From e785b8196b0fc1d5e588cb8d731b0118ee1e689a Mon Sep 17 00:00:00 2001 From: Vallari Agrawal Date: Mon, 13 Jun 2022 02:20:50 +0530 Subject: [PATCH] orch/run: Scan teuthology log files for unit test errors - Scan teuthology log files from bottom up - Keep a flag to keep track of line-number till which logs were last scanned Do not scan above this flag index. - Raise UnitTestError with error message found in log files Signed-off-by: Vallari Agrawal --- .coveragerc | 2 + .dockerignore | 3 + .github/workflows/ci.yml | 30 + .github/workflows/dependencies.yml | 47 + .github/workflows/integration.yml | 12 + .gitignore | 30 + .gitlab-ci.yml | 3 + .readthedocs.yml | 19 + .travis.yml | 17 + LICENSE | 19 + MANIFEST.in | 4 + README.rst | 11 + beanstalk/alpine/Dockerfile | 13 + bootstrap | 172 ++ build_qemu_image.sh | 61 + docker-compose.yml | 81 + docs/COMPONENTS.rst | 70 + docs/ChangeLog.rst | 6 + docs/INSTALL.rst | 119 ++ docs/LAB_SETUP.rst | 142 ++ docs/Makefile | 180 ++ docs/README.rst | 150 ++ docs/_static/create_nodes.py | 65 + docs/_static/nginx_paddles | 11 + docs/_static/nginx_pulpito | 11 + docs/_static/nginx_test_logs | 7 + docs/_static/worker_start.sh | 40 + .../ceph/static/font/ApexSans-Book.eot | Bin 0 -> 199888 bytes .../ceph/static/font/ApexSans-Book.svg | 1 + .../ceph/static/font/ApexSans-Book.ttf | Bin 0 -> 199616 bytes .../ceph/static/font/ApexSans-Book.woff | Bin 0 -> 64736 bytes .../ceph/static/font/ApexSans-Medium.eot | Bin 0 -> 169448 bytes .../ceph/static/font/ApexSans-Medium.svg | 1 + .../ceph/static/font/ApexSans-Medium.ttf | Bin 0 -> 169168 bytes .../ceph/static/font/ApexSans-Medium.woff | Bin 0 -> 61116 bytes docs/_themes/ceph/static/nature.css_t | 325 ++++ docs/_themes/ceph/theme.conf | 4 + docs/cephlab.png | Bin 0 -> 36032 bytes docs/cephlab.svg | 3 + docs/commands/list.rst | 9 + docs/commands/teuthology-describe.rst | 4 + docs/commands/teuthology-dispatcher.rst | 9 + docs/commands/teuthology-kill.rst | 4 + docs/commands/teuthology-lock.rst | 4 + docs/commands/teuthology-ls.rst | 4 + docs/commands/teuthology-nuke.rst | 4 + docs/commands/teuthology-openstack.rst | 4 + docs/commands/teuthology-prune-logs.rst | 4 + docs/commands/teuthology-queue.rst | 4 + docs/commands/teuthology-reimage.rst | 4 + docs/commands/teuthology-report.rst | 4 + docs/commands/teuthology-results.rst | 4 + docs/commands/teuthology-schedule.rst | 4 + docs/commands/teuthology-suite.rst | 4 + docs/commands/teuthology-update-inventory.rst | 4 + docs/commands/teuthology-updatekeys.rst | 4 + docs/commands/teuthology-wait.rst | 4 + docs/commands/teuthology-worker.rst | 4 + docs/commands/teuthology.rst | 4 + docs/conf.py | 262 +++ docs/detailed_test_config.rst | 315 +++ docs/docker-compose/README.md | 93 + docs/docker-compose/db/01-init.sh | 8 + docs/docker-compose/docker-compose.yml | 92 + docs/docker-compose/start.sh | 48 + docs/docker-compose/testnode/Dockerfile | 26 + .../docker-compose/testnode/testnode_start.sh | 13 + docs/docker-compose/testnode/testnode_stop.sh | 10 + docs/docker-compose/testnode/testnode_sudoers | 4 + .../teuthology/.teuthology.yaml | 9 + docs/docker-compose/teuthology/Dockerfile | 43 + .../teuthology/containerized_node.yaml | 8 + docs/docker-compose/teuthology/teuthology.sh | 46 + docs/downburst_vms.rst | 89 + docs/fragment_merging.rst | 318 ++++ docs/index.rst | 25 + docs/intro_testers.rst | 81 + docs/laptop/README.md | 434 +++++ docs/laptop/default-pool.xml | 7 + docs/laptop/front.xml | 15 + docs/laptop/hosts | 7 + docs/laptop/ssh_config | 6 + docs/laptop/targets.sql | 9 + docs/laptop/teuthology.yaml | 30 + docs/libcloud_backend.rst | 43 + docs/openstack_backend.rst | 214 +++ docs/requirements.txt | 3 + docs/siteconfig.rst | 248 +++ examples/3node_ceph.yaml | 15 + examples/3node_rgw.yaml | 24 + examples/parallel_example.yaml | 20 + hammer.sh | 32 + openstack-delegate.sh | 7 + pyproject.toml | 10 + pytest.ini | 5 + requirements.txt | 384 ++++ roles/3-simple.yaml | 4 + roles/overrides.yaml | 10 + scripts/__init__.py | 0 scripts/describe.py | 79 + scripts/dispatcher.py | 35 + scripts/kill.py | 44 + scripts/lock.py | 181 ++ scripts/ls.py | 19 + scripts/nuke.py | 47 + scripts/openstack.py | 409 ++++ scripts/prune_logs.py | 38 + scripts/queue.py | 36 + scripts/reimage.py | 25 + scripts/report.py | 42 + scripts/results.py | 25 + scripts/run.py | 38 + scripts/schedule.py | 61 + scripts/suite.py | 219 +++ scripts/test/script.py | 16 + scripts/test/test_lock.py | 5 + scripts/test/test_ls.py | 15 + scripts/test/test_nuke.py | 5 + scripts/test/test_prune_logs.py | 5 + scripts/test/test_report.py | 5 + scripts/test/test_results.py | 5 + scripts/test/test_run.py | 45 + scripts/test/test_schedule.py | 5 + scripts/test/test_suite.py | 5 + scripts/test/test_updatekeys.py | 21 + scripts/test/test_worker.py | 5 + scripts/update_inventory.py | 41 + scripts/updatekeys.py | 31 + scripts/wait.py | 31 + scripts/worker.py | 37 + setup.cfg | 129 ++ teuthology/__init__.py | 109 ++ teuthology/beanstalk.py | 215 +++ teuthology/ceph.conf.template | 101 + teuthology/config.py | 288 +++ teuthology/contextutil.py | 145 ++ teuthology/describe_tests.py | 358 ++++ teuthology/dispatcher/__init__.py | 202 ++ teuthology/dispatcher/supervisor.py | 351 ++++ .../test_reimage_error_mark_machine_down.py | 104 + teuthology/exceptions.py | 231 +++ teuthology/exit.py | 78 + teuthology/job_status.py | 38 + teuthology/kill.py | 269 +++ teuthology/lock/__init__.py | 0 teuthology/lock/cli.py | 300 +++ teuthology/lock/ops.py | 449 +++++ teuthology/lock/query.py | 153 ++ teuthology/lock/test/__init__.py | 0 teuthology/lock/test/test_lock.py | 7 + teuthology/lock/util.py | 121 ++ teuthology/ls.py | 69 + teuthology/misc.py | 1385 ++++++++++++++ teuthology/nuke/__init__.py | 366 ++++ teuthology/nuke/actions.py | 460 +++++ teuthology/openstack/__init__.py | 1366 +++++++++++++ teuthology/openstack/archive-key | 27 + teuthology/openstack/archive-key.pub | 1 + teuthology/openstack/bootstrap-teuthology.sh | 33 + teuthology/openstack/openstack-basic.yaml | 15 + .../openstack/openstack-buildpackages.yaml | 10 + .../openstack-centos-6.5-user-data.txt | 24 + .../openstack-centos-7.0-user-data.txt | 21 + .../openstack-centos-7.1-user-data.txt | 21 + .../openstack-centos-7.2-user-data.txt | 21 + .../openstack-centos-7.3-user-data.txt | 1 + .../openstack-debian-7.0-user-data.txt | 1 + .../openstack-debian-8.0-user-data.txt | 24 + .../openstack-opensuse-15.0-user-data.txt | 26 + .../openstack-opensuse-15.1-user-data.txt | 1 + .../openstack-opensuse-42.1-user-data.txt | 27 + .../openstack-opensuse-42.2-user-data.txt | 28 + .../openstack-opensuse-42.3-user-data.txt | 27 + .../openstack-sle-12.1-user-data.txt | 25 + .../openstack-sle-12.2-user-data.txt | 27 + .../openstack-sle-12.3-user-data.txt | 24 + .../openstack-sle-15.0-user-data.txt | 25 + .../openstack-sle-15.1-user-data.txt | 37 + .../openstack/openstack-teuthology.cron | 2 + .../openstack/openstack-teuthology.init | 225 +++ .../openstack-ubuntu-12.04-user-data.txt | 23 + .../openstack-ubuntu-14.04-user-data.txt | 21 + .../openstack-ubuntu-16.04-user-data.txt | 21 + teuthology/openstack/openstack-user-data.txt | 22 + teuthology/openstack/setup-openstack.sh | 793 ++++++++ teuthology/openstack/test/__init__.py | 0 .../openstack/test/archive-on-error.yaml | 1 + teuthology/openstack/test/noop.yaml | 12 + .../openstack/test/openstack-integration.py | 286 +++ teuthology/openstack/test/resources_hint.yaml | 25 + .../test/resources_hint_no_cinder.yaml | 20 + teuthology/openstack/test/stop_worker.yaml | 1 + teuthology/openstack/test/suites/noop/+ | 0 .../openstack/test/suites/noop/noop.yaml | 9 + teuthology/openstack/test/suites/nuke/+ | 0 .../openstack/test/suites/nuke/nuke.yaml | 8 + teuthology/openstack/test/test_config.py | 35 + teuthology/openstack/test/test_openstack.py | 1695 +++++++++++++++++ teuthology/openstack/test/user-data-test1.txt | 5 + teuthology/orchestra/__init__.py | 0 teuthology/orchestra/cluster.py | 188 ++ teuthology/orchestra/connection.py | 110 ++ teuthology/orchestra/console.py | 388 ++++ teuthology/orchestra/daemon/__init__.py | 1 + teuthology/orchestra/daemon/cephadmunit.py | 177 ++ teuthology/orchestra/daemon/group.py | 180 ++ teuthology/orchestra/daemon/state.py | 171 ++ teuthology/orchestra/daemon/systemd.py | 229 +++ teuthology/orchestra/monkey.py | 56 + teuthology/orchestra/opsys.py | 241 +++ teuthology/orchestra/remote.py | 725 +++++++ teuthology/orchestra/run.py | 534 ++++++ teuthology/orchestra/test/__init__.py | 0 .../daemon-systemdstate-pid-ps-ef.output | 5 + .../orchestra/test/integration/__init__.py | 0 .../test/integration/test_integration.py | 94 + .../test/log_files/test_scan_gtest.log | 54 + .../test/log_files/test_scan_nose.log | 49 + teuthology/orchestra/test/test_cluster.py | 232 +++ teuthology/orchestra/test/test_connection.py | 119 ++ teuthology/orchestra/test/test_console.py | 217 +++ teuthology/orchestra/test/test_opsys.py | 404 ++++ teuthology/orchestra/test/test_remote.py | 205 ++ teuthology/orchestra/test/test_run.py | 286 +++ teuthology/orchestra/test/test_systemd.py | 54 + teuthology/orchestra/test/util.py | 12 + teuthology/packaging.py | 1063 +++++++++++ teuthology/parallel.py | 115 ++ teuthology/provision/__init__.py | 121 ++ teuthology/provision/cloud/__init__.py | 49 + teuthology/provision/cloud/base.py | 89 + teuthology/provision/cloud/openstack.py | 452 +++++ teuthology/provision/cloud/test/test_base.py | 90 + .../provision/cloud/test/test_cloud_init.py | 60 + .../provision/cloud/test/test_cloud_util.py | 172 ++ .../provision/cloud/test/test_openstack.py | 781 ++++++++ .../test/test_openstack_userdata_conf.yaml | 24 + teuthology/provision/cloud/util.py | 116 ++ teuthology/provision/downburst.py | 322 ++++ teuthology/provision/fog.py | 312 +++ teuthology/provision/openstack.py | 234 +++ teuthology/provision/pelagos.py | 173 ++ teuthology/provision/test/test_downburst.py | 105 + teuthology/provision/test/test_fog.py | 317 +++ .../provision/test/test_init_provision.py | 46 + teuthology/provision/test/test_pelagos.py | 46 + teuthology/prune.py | 237 +++ teuthology/reimage.py | 57 + teuthology/repo_utils.py | 461 +++++ teuthology/report.py | 584 ++++++ teuthology/results.py | 272 +++ teuthology/run.py | 414 ++++ teuthology/run_tasks.py | 360 ++++ teuthology/safepath.py | 42 + teuthology/schedule.py | 143 ++ teuthology/scrape.py | 515 +++++ teuthology/suite/__init__.py | 236 +++ teuthology/suite/build_matrix.py | 209 ++ teuthology/suite/fragment-merge.lua | 104 + teuthology/suite/matrix.py | 388 ++++ teuthology/suite/merge.py | 170 ++ teuthology/suite/placeholder.py | 109 ++ teuthology/suite/run.py | 707 +++++++ teuthology/suite/test/suites/noop/noop.yaml | 7 + teuthology/suite/test/test_build_matrix.py | 815 ++++++++ teuthology/suite/test/test_init.py | 266 +++ teuthology/suite/test/test_matrix.py | 82 + teuthology/suite/test/test_merge.py | 233 +++ teuthology/suite/test/test_placeholder.py | 55 + teuthology/suite/test/test_run_.py | 425 +++++ teuthology/suite/test/test_util.py | 374 ++++ teuthology/suite/util.py | 499 +++++ teuthology/task/__init__.py | 136 ++ teuthology/task/ansible.py | 429 +++++ teuthology/task/args.py | 60 + teuthology/task/background_exec.py | 76 + teuthology/task/buildpackages.py | 245 +++ teuthology/task/buildpackages/Makefile | 84 + .../buildpackages/centos-6.5-user-data.txt | 15 + .../buildpackages/centos-7.0-user-data.txt | 1 + .../buildpackages/centos-7.1-user-data.txt | 1 + .../buildpackages/centos-7.2-user-data.txt | 1 + .../buildpackages/centos-7.3-user-data.txt | 1 + teuthology/task/buildpackages/common.sh | 169 ++ .../buildpackages/debian-8.0-user-data.txt | 12 + teuthology/task/buildpackages/make-deb.sh | 160 ++ teuthology/task/buildpackages/make-rpm.sh | 294 +++ .../buildpackages/opensuse-15.0-user-data.txt | 16 + .../buildpackages/opensuse-42.1-user-data.txt | 13 + .../buildpackages/opensuse-42.2-user-data.txt | 14 + .../buildpackages/opensuse-42.3-user-data.txt | 1 + .../task/buildpackages/sle-12.1-user-data.txt | 14 + .../task/buildpackages/sle-12.2-user-data.txt | 1 + .../task/buildpackages/sle-12.3-user-data.txt | 1 + .../task/buildpackages/sle-15.0-user-data.txt | 14 + .../buildpackages/ubuntu-12.04-user-data.txt | 1 + .../buildpackages/ubuntu-14.04-user-data.txt | 1 + .../buildpackages/ubuntu-16.04-user-data.txt | 1 + teuthology/task/buildpackages/user-data.txt | 10 + teuthology/task/ceph_ansible.py | 500 +++++ teuthology/task/cephmetrics.py | 95 + teuthology/task/clock.py | 122 ++ teuthology/task/common_fs_utils.py | 123 ++ teuthology/task/console_log.py | 112 ++ teuthology/task/dump_ctx.py | 19 + teuthology/task/exec.py | 74 + teuthology/task/full_sequential.py | 39 + teuthology/task/full_sequential_finally.py | 54 + teuthology/task/hadoop.py | 424 +++++ teuthology/task/install/__init__.py | 619 ++++++ teuthology/task/install/adjust-ulimits | 16 + teuthology/task/install/daemon-helper | 114 ++ teuthology/task/install/deb.py | 226 +++ teuthology/task/install/packages.yaml | 37 + teuthology/task/install/redhat.py | 217 +++ teuthology/task/install/rpm.py | 432 +++++ teuthology/task/install/util.py | 153 ++ teuthology/task/interactive.py | 40 + teuthology/task/internal/__init__.py | 527 +++++ teuthology/task/internal/check_lock.py | 35 + teuthology/task/internal/edit_sudoers.sh | 10 + teuthology/task/internal/git_ignore_ssl.py | 22 + teuthology/task/internal/lock_machines.py | 36 + teuthology/task/internal/redhat.py | 267 +++ teuthology/task/internal/syslog.py | 196 ++ teuthology/task/internal/vm_setup.py | 51 + teuthology/task/iscsi.py | 214 +++ teuthology/task/kernel.py | 1341 +++++++++++++ teuthology/task/knfsd.py | 169 ++ teuthology/task/localdir.py | 69 + teuthology/task/lockfile.py | 241 +++ teuthology/task/loop.py | 45 + teuthology/task/mpi.py | 137 ++ teuthology/task/nfs.py | 146 ++ teuthology/task/nop.py | 13 + teuthology/task/parallel.py | 71 + teuthology/task/parallel_example.py | 58 + teuthology/task/pcp.j2 | 15 + teuthology/task/pcp.py | 335 ++++ teuthology/task/pexec.py | 149 ++ teuthology/task/print.py | 25 + teuthology/task/proc_thrasher.py | 80 + teuthology/task/selinux.py | 217 +++ teuthology/task/sequential.py | 58 + teuthology/task/sleep.py | 32 + teuthology/task/ssh_keys.py | 207 ++ teuthology/task/tasktest.py | 50 + teuthology/task/tests/__init__.py | 107 ++ teuthology/task/tests/test_locking.py | 25 + teuthology/task/tests/test_run.py | 40 + teuthology/task/timer.py | 46 + .../email-sleep-before-teardown.jinja2 | 10 + .../rocketchat-sleep-before-teardown.jinja2 | 6 + teuthology/test/__init__.py | 9 + teuthology/test/fake_archive.py | 107 ++ teuthology/test/fake_fs.py | 90 + teuthology/test/integration/__init__.py | 0 teuthology/test/integration/test_suite.py | 86 + teuthology/test/task/__init__.py | 205 ++ teuthology/test/task/test_ansible.py | 625 ++++++ teuthology/test/task/test_ceph_ansible.py | 177 ++ teuthology/test/task/test_console_log.py | 88 + teuthology/test/task/test_install.py | 337 ++++ teuthology/test/task/test_internal.py | 57 + teuthology/test/task/test_kernel.py | 243 +++ teuthology/test/task/test_pcp.py | 379 ++++ teuthology/test/task/test_selinux.py | 35 + teuthology/test/test_config.py | 189 ++ teuthology/test/test_contextutil.py | 68 + teuthology/test/test_describe_tests.py | 317 +++ .../test/test_email_sleep_before_teardown.py | 81 + teuthology/test/test_exit.py | 99 + teuthology/test/test_get_distro.py | 47 + teuthology/test/test_get_distro_version.py | 47 + .../test/test_get_multi_machine_types.py | 27 + teuthology/test/test_job_status.py | 60 + teuthology/test/test_ls.py | 48 + teuthology/test/test_misc.py | 388 ++++ teuthology/test/test_nuke.py | 293 +++ teuthology/test/test_packaging.py | 794 ++++++++ teuthology/test/test_parallel.py | 28 + teuthology/test/test_repo_utils.py | 242 +++ teuthology/test/test_report.py | 77 + teuthology/test/test_results.py | 155 ++ teuthology/test/test_run.py | 247 +++ teuthology/test/test_safepath.py | 55 + teuthology/test/test_schedule.py | 45 + teuthology/test/test_scrape.py | 167 ++ teuthology/test/test_timer.py | 80 + .../test_vps_os_vers_parameter_checking.py | 84 + teuthology/test/test_worker.py | 307 +++ teuthology/timer.py | 114 ++ teuthology/util/__init__.py | 0 teuthology/util/compat.py | 16 + teuthology/util/flock.py | 22 + teuthology/worker.py | 357 ++++ tox.ini | 61 + update-requirements.sh | 3 + watch-suite.sh | 4 + 399 files changed, 51664 insertions(+) create mode 100644 .coveragerc create mode 100644 .dockerignore create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/dependencies.yml create mode 100644 .github/workflows/integration.yml create mode 100644 .gitignore create mode 100644 .gitlab-ci.yml create mode 100644 .readthedocs.yml create mode 100644 .travis.yml create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 beanstalk/alpine/Dockerfile create mode 100755 bootstrap create mode 100755 build_qemu_image.sh create mode 100644 docker-compose.yml create mode 100644 docs/COMPONENTS.rst create mode 100644 docs/ChangeLog.rst create mode 100644 docs/INSTALL.rst create mode 100644 docs/LAB_SETUP.rst create mode 100644 docs/Makefile create mode 100644 docs/README.rst create mode 100755 docs/_static/create_nodes.py create mode 100644 docs/_static/nginx_paddles create mode 100644 docs/_static/nginx_pulpito create mode 100644 docs/_static/nginx_test_logs create mode 100644 docs/_static/worker_start.sh create mode 100644 docs/_themes/ceph/static/font/ApexSans-Book.eot create mode 100644 docs/_themes/ceph/static/font/ApexSans-Book.svg create mode 100644 docs/_themes/ceph/static/font/ApexSans-Book.ttf create mode 100644 docs/_themes/ceph/static/font/ApexSans-Book.woff create mode 100644 docs/_themes/ceph/static/font/ApexSans-Medium.eot create mode 100644 docs/_themes/ceph/static/font/ApexSans-Medium.svg create mode 100644 docs/_themes/ceph/static/font/ApexSans-Medium.ttf create mode 100644 docs/_themes/ceph/static/font/ApexSans-Medium.woff create mode 100644 docs/_themes/ceph/static/nature.css_t create mode 100644 docs/_themes/ceph/theme.conf create mode 100644 docs/cephlab.png create mode 100644 docs/cephlab.svg create mode 100644 docs/commands/list.rst create mode 100644 docs/commands/teuthology-describe.rst create mode 100644 docs/commands/teuthology-dispatcher.rst create mode 100644 docs/commands/teuthology-kill.rst create mode 100644 docs/commands/teuthology-lock.rst create mode 100644 docs/commands/teuthology-ls.rst create mode 100644 docs/commands/teuthology-nuke.rst create mode 100644 docs/commands/teuthology-openstack.rst create mode 100644 docs/commands/teuthology-prune-logs.rst create mode 100644 docs/commands/teuthology-queue.rst create mode 100644 docs/commands/teuthology-reimage.rst create mode 100644 docs/commands/teuthology-report.rst create mode 100644 docs/commands/teuthology-results.rst create mode 100644 docs/commands/teuthology-schedule.rst create mode 100644 docs/commands/teuthology-suite.rst create mode 100644 docs/commands/teuthology-update-inventory.rst create mode 100644 docs/commands/teuthology-updatekeys.rst create mode 100644 docs/commands/teuthology-wait.rst create mode 100644 docs/commands/teuthology-worker.rst create mode 100644 docs/commands/teuthology.rst create mode 100644 docs/conf.py create mode 100644 docs/detailed_test_config.rst create mode 100644 docs/docker-compose/README.md create mode 100755 docs/docker-compose/db/01-init.sh create mode 100644 docs/docker-compose/docker-compose.yml create mode 100755 docs/docker-compose/start.sh create mode 100644 docs/docker-compose/testnode/Dockerfile create mode 100755 docs/docker-compose/testnode/testnode_start.sh create mode 100755 docs/docker-compose/testnode/testnode_stop.sh create mode 100644 docs/docker-compose/testnode/testnode_sudoers create mode 100644 docs/docker-compose/teuthology/.teuthology.yaml create mode 100644 docs/docker-compose/teuthology/Dockerfile create mode 100644 docs/docker-compose/teuthology/containerized_node.yaml create mode 100755 docs/docker-compose/teuthology/teuthology.sh create mode 100644 docs/downburst_vms.rst create mode 100644 docs/fragment_merging.rst create mode 100644 docs/index.rst create mode 100644 docs/intro_testers.rst create mode 100644 docs/laptop/README.md create mode 100644 docs/laptop/default-pool.xml create mode 100644 docs/laptop/front.xml create mode 100644 docs/laptop/hosts create mode 100644 docs/laptop/ssh_config create mode 100644 docs/laptop/targets.sql create mode 100644 docs/laptop/teuthology.yaml create mode 100644 docs/libcloud_backend.rst create mode 100644 docs/openstack_backend.rst create mode 100644 docs/requirements.txt create mode 100644 docs/siteconfig.rst create mode 100644 examples/3node_ceph.yaml create mode 100644 examples/3node_rgw.yaml create mode 100644 examples/parallel_example.yaml create mode 100755 hammer.sh create mode 100755 openstack-delegate.sh create mode 100644 pyproject.toml create mode 100644 pytest.ini create mode 100644 requirements.txt create mode 100644 roles/3-simple.yaml create mode 100644 roles/overrides.yaml create mode 100644 scripts/__init__.py create mode 100644 scripts/describe.py create mode 100644 scripts/dispatcher.py create mode 100644 scripts/kill.py create mode 100644 scripts/lock.py create mode 100644 scripts/ls.py create mode 100644 scripts/nuke.py create mode 100644 scripts/openstack.py create mode 100644 scripts/prune_logs.py create mode 100644 scripts/queue.py create mode 100644 scripts/reimage.py create mode 100644 scripts/report.py create mode 100644 scripts/results.py create mode 100644 scripts/run.py create mode 100644 scripts/schedule.py create mode 100644 scripts/suite.py create mode 100644 scripts/test/script.py create mode 100644 scripts/test/test_lock.py create mode 100644 scripts/test/test_ls.py create mode 100644 scripts/test/test_nuke.py create mode 100644 scripts/test/test_prune_logs.py create mode 100644 scripts/test/test_report.py create mode 100644 scripts/test/test_results.py create mode 100644 scripts/test/test_run.py create mode 100644 scripts/test/test_schedule.py create mode 100644 scripts/test/test_suite.py create mode 100644 scripts/test/test_updatekeys.py create mode 100644 scripts/test/test_worker.py create mode 100644 scripts/update_inventory.py create mode 100644 scripts/updatekeys.py create mode 100644 scripts/wait.py create mode 100644 scripts/worker.py create mode 100644 setup.cfg create mode 100644 teuthology/__init__.py create mode 100644 teuthology/beanstalk.py create mode 100644 teuthology/ceph.conf.template create mode 100644 teuthology/config.py create mode 100644 teuthology/contextutil.py create mode 100644 teuthology/describe_tests.py create mode 100644 teuthology/dispatcher/__init__.py create mode 100644 teuthology/dispatcher/supervisor.py create mode 100644 teuthology/dispatcher/test/test_reimage_error_mark_machine_down.py create mode 100644 teuthology/exceptions.py create mode 100644 teuthology/exit.py create mode 100644 teuthology/job_status.py create mode 100755 teuthology/kill.py create mode 100644 teuthology/lock/__init__.py create mode 100644 teuthology/lock/cli.py create mode 100644 teuthology/lock/ops.py create mode 100644 teuthology/lock/query.py create mode 100644 teuthology/lock/test/__init__.py create mode 100644 teuthology/lock/test/test_lock.py create mode 100644 teuthology/lock/util.py create mode 100644 teuthology/ls.py create mode 100644 teuthology/misc.py create mode 100644 teuthology/nuke/__init__.py create mode 100644 teuthology/nuke/actions.py create mode 100644 teuthology/openstack/__init__.py create mode 100644 teuthology/openstack/archive-key create mode 100644 teuthology/openstack/archive-key.pub create mode 100644 teuthology/openstack/bootstrap-teuthology.sh create mode 100644 teuthology/openstack/openstack-basic.yaml create mode 100644 teuthology/openstack/openstack-buildpackages.yaml create mode 100644 teuthology/openstack/openstack-centos-6.5-user-data.txt create mode 100644 teuthology/openstack/openstack-centos-7.0-user-data.txt create mode 100644 teuthology/openstack/openstack-centos-7.1-user-data.txt create mode 100644 teuthology/openstack/openstack-centos-7.2-user-data.txt create mode 120000 teuthology/openstack/openstack-centos-7.3-user-data.txt create mode 120000 teuthology/openstack/openstack-debian-7.0-user-data.txt create mode 100644 teuthology/openstack/openstack-debian-8.0-user-data.txt create mode 100644 teuthology/openstack/openstack-opensuse-15.0-user-data.txt create mode 120000 teuthology/openstack/openstack-opensuse-15.1-user-data.txt create mode 100644 teuthology/openstack/openstack-opensuse-42.1-user-data.txt create mode 100644 teuthology/openstack/openstack-opensuse-42.2-user-data.txt create mode 100644 teuthology/openstack/openstack-opensuse-42.3-user-data.txt create mode 100644 teuthology/openstack/openstack-sle-12.1-user-data.txt create mode 100644 teuthology/openstack/openstack-sle-12.2-user-data.txt create mode 100644 teuthology/openstack/openstack-sle-12.3-user-data.txt create mode 100644 teuthology/openstack/openstack-sle-15.0-user-data.txt create mode 100644 teuthology/openstack/openstack-sle-15.1-user-data.txt create mode 100644 teuthology/openstack/openstack-teuthology.cron create mode 100755 teuthology/openstack/openstack-teuthology.init create mode 100644 teuthology/openstack/openstack-ubuntu-12.04-user-data.txt create mode 100644 teuthology/openstack/openstack-ubuntu-14.04-user-data.txt create mode 100644 teuthology/openstack/openstack-ubuntu-16.04-user-data.txt create mode 100644 teuthology/openstack/openstack-user-data.txt create mode 100755 teuthology/openstack/setup-openstack.sh create mode 100644 teuthology/openstack/test/__init__.py create mode 100644 teuthology/openstack/test/archive-on-error.yaml create mode 100644 teuthology/openstack/test/noop.yaml create mode 100644 teuthology/openstack/test/openstack-integration.py create mode 100644 teuthology/openstack/test/resources_hint.yaml create mode 100644 teuthology/openstack/test/resources_hint_no_cinder.yaml create mode 100644 teuthology/openstack/test/stop_worker.yaml create mode 100644 teuthology/openstack/test/suites/noop/+ create mode 100644 teuthology/openstack/test/suites/noop/noop.yaml create mode 100644 teuthology/openstack/test/suites/nuke/+ create mode 100644 teuthology/openstack/test/suites/nuke/nuke.yaml create mode 100644 teuthology/openstack/test/test_config.py create mode 100644 teuthology/openstack/test/test_openstack.py create mode 100644 teuthology/openstack/test/user-data-test1.txt create mode 100644 teuthology/orchestra/__init__.py create mode 100644 teuthology/orchestra/cluster.py create mode 100644 teuthology/orchestra/connection.py create mode 100644 teuthology/orchestra/console.py create mode 100644 teuthology/orchestra/daemon/__init__.py create mode 100644 teuthology/orchestra/daemon/cephadmunit.py create mode 100644 teuthology/orchestra/daemon/group.py create mode 100644 teuthology/orchestra/daemon/state.py create mode 100644 teuthology/orchestra/daemon/systemd.py create mode 100644 teuthology/orchestra/monkey.py create mode 100644 teuthology/orchestra/opsys.py create mode 100644 teuthology/orchestra/remote.py create mode 100644 teuthology/orchestra/run.py create mode 100644 teuthology/orchestra/test/__init__.py create mode 100644 teuthology/orchestra/test/files/daemon-systemdstate-pid-ps-ef.output create mode 100644 teuthology/orchestra/test/integration/__init__.py create mode 100644 teuthology/orchestra/test/integration/test_integration.py create mode 100644 teuthology/orchestra/test/log_files/test_scan_gtest.log create mode 100644 teuthology/orchestra/test/log_files/test_scan_nose.log create mode 100644 teuthology/orchestra/test/test_cluster.py create mode 100644 teuthology/orchestra/test/test_connection.py create mode 100644 teuthology/orchestra/test/test_console.py create mode 100644 teuthology/orchestra/test/test_opsys.py create mode 100644 teuthology/orchestra/test/test_remote.py create mode 100644 teuthology/orchestra/test/test_run.py create mode 100644 teuthology/orchestra/test/test_systemd.py create mode 100644 teuthology/orchestra/test/util.py create mode 100644 teuthology/packaging.py create mode 100644 teuthology/parallel.py create mode 100644 teuthology/provision/__init__.py create mode 100644 teuthology/provision/cloud/__init__.py create mode 100644 teuthology/provision/cloud/base.py create mode 100644 teuthology/provision/cloud/openstack.py create mode 100644 teuthology/provision/cloud/test/test_base.py create mode 100644 teuthology/provision/cloud/test/test_cloud_init.py create mode 100644 teuthology/provision/cloud/test/test_cloud_util.py create mode 100644 teuthology/provision/cloud/test/test_openstack.py create mode 100644 teuthology/provision/cloud/test/test_openstack_userdata_conf.yaml create mode 100644 teuthology/provision/cloud/util.py create mode 100644 teuthology/provision/downburst.py create mode 100644 teuthology/provision/fog.py create mode 100644 teuthology/provision/openstack.py create mode 100644 teuthology/provision/pelagos.py create mode 100644 teuthology/provision/test/test_downburst.py create mode 100644 teuthology/provision/test/test_fog.py create mode 100644 teuthology/provision/test/test_init_provision.py create mode 100644 teuthology/provision/test/test_pelagos.py create mode 100644 teuthology/prune.py create mode 100644 teuthology/reimage.py create mode 100644 teuthology/repo_utils.py create mode 100644 teuthology/report.py create mode 100644 teuthology/results.py create mode 100644 teuthology/run.py create mode 100644 teuthology/run_tasks.py create mode 100644 teuthology/safepath.py create mode 100644 teuthology/schedule.py create mode 100644 teuthology/scrape.py create mode 100644 teuthology/suite/__init__.py create mode 100644 teuthology/suite/build_matrix.py create mode 100644 teuthology/suite/fragment-merge.lua create mode 100644 teuthology/suite/matrix.py create mode 100644 teuthology/suite/merge.py create mode 100644 teuthology/suite/placeholder.py create mode 100644 teuthology/suite/run.py create mode 100644 teuthology/suite/test/suites/noop/noop.yaml create mode 100644 teuthology/suite/test/test_build_matrix.py create mode 100644 teuthology/suite/test/test_init.py create mode 100644 teuthology/suite/test/test_matrix.py create mode 100644 teuthology/suite/test/test_merge.py create mode 100644 teuthology/suite/test/test_placeholder.py create mode 100644 teuthology/suite/test/test_run_.py create mode 100644 teuthology/suite/test/test_util.py create mode 100644 teuthology/suite/util.py create mode 100644 teuthology/task/__init__.py create mode 100644 teuthology/task/ansible.py create mode 100644 teuthology/task/args.py create mode 100644 teuthology/task/background_exec.py create mode 100644 teuthology/task/buildpackages.py create mode 100644 teuthology/task/buildpackages/Makefile create mode 100644 teuthology/task/buildpackages/centos-6.5-user-data.txt create mode 120000 teuthology/task/buildpackages/centos-7.0-user-data.txt create mode 120000 teuthology/task/buildpackages/centos-7.1-user-data.txt create mode 120000 teuthology/task/buildpackages/centos-7.2-user-data.txt create mode 120000 teuthology/task/buildpackages/centos-7.3-user-data.txt create mode 100644 teuthology/task/buildpackages/common.sh create mode 100644 teuthology/task/buildpackages/debian-8.0-user-data.txt create mode 100755 teuthology/task/buildpackages/make-deb.sh create mode 100755 teuthology/task/buildpackages/make-rpm.sh create mode 100644 teuthology/task/buildpackages/opensuse-15.0-user-data.txt create mode 100644 teuthology/task/buildpackages/opensuse-42.1-user-data.txt create mode 100644 teuthology/task/buildpackages/opensuse-42.2-user-data.txt create mode 120000 teuthology/task/buildpackages/opensuse-42.3-user-data.txt create mode 100644 teuthology/task/buildpackages/sle-12.1-user-data.txt create mode 120000 teuthology/task/buildpackages/sle-12.2-user-data.txt create mode 120000 teuthology/task/buildpackages/sle-12.3-user-data.txt create mode 100644 teuthology/task/buildpackages/sle-15.0-user-data.txt create mode 120000 teuthology/task/buildpackages/ubuntu-12.04-user-data.txt create mode 120000 teuthology/task/buildpackages/ubuntu-14.04-user-data.txt create mode 120000 teuthology/task/buildpackages/ubuntu-16.04-user-data.txt create mode 100644 teuthology/task/buildpackages/user-data.txt create mode 100644 teuthology/task/ceph_ansible.py create mode 100644 teuthology/task/cephmetrics.py create mode 100644 teuthology/task/clock.py create mode 100644 teuthology/task/common_fs_utils.py create mode 100644 teuthology/task/console_log.py create mode 100644 teuthology/task/dump_ctx.py create mode 100644 teuthology/task/exec.py create mode 100644 teuthology/task/full_sequential.py create mode 100644 teuthology/task/full_sequential_finally.py create mode 100644 teuthology/task/hadoop.py create mode 100644 teuthology/task/install/__init__.py create mode 100755 teuthology/task/install/adjust-ulimits create mode 100755 teuthology/task/install/daemon-helper create mode 100644 teuthology/task/install/deb.py create mode 100644 teuthology/task/install/packages.yaml create mode 100644 teuthology/task/install/redhat.py create mode 100644 teuthology/task/install/rpm.py create mode 100644 teuthology/task/install/util.py create mode 100644 teuthology/task/interactive.py create mode 100644 teuthology/task/internal/__init__.py create mode 100644 teuthology/task/internal/check_lock.py create mode 100755 teuthology/task/internal/edit_sudoers.sh create mode 100644 teuthology/task/internal/git_ignore_ssl.py create mode 100644 teuthology/task/internal/lock_machines.py create mode 100644 teuthology/task/internal/redhat.py create mode 100644 teuthology/task/internal/syslog.py create mode 100644 teuthology/task/internal/vm_setup.py create mode 100644 teuthology/task/iscsi.py create mode 100644 teuthology/task/kernel.py create mode 100644 teuthology/task/knfsd.py create mode 100644 teuthology/task/localdir.py create mode 100644 teuthology/task/lockfile.py create mode 100644 teuthology/task/loop.py create mode 100644 teuthology/task/mpi.py create mode 100644 teuthology/task/nfs.py create mode 100644 teuthology/task/nop.py create mode 100644 teuthology/task/parallel.py create mode 100644 teuthology/task/parallel_example.py create mode 100644 teuthology/task/pcp.j2 create mode 100644 teuthology/task/pcp.py create mode 100644 teuthology/task/pexec.py create mode 100644 teuthology/task/print.py create mode 100644 teuthology/task/proc_thrasher.py create mode 100644 teuthology/task/selinux.py create mode 100644 teuthology/task/sequential.py create mode 100644 teuthology/task/sleep.py create mode 100644 teuthology/task/ssh_keys.py create mode 100644 teuthology/task/tasktest.py create mode 100644 teuthology/task/tests/__init__.py create mode 100644 teuthology/task/tests/test_locking.py create mode 100644 teuthology/task/tests/test_run.py create mode 100644 teuthology/task/timer.py create mode 100644 teuthology/templates/email-sleep-before-teardown.jinja2 create mode 100644 teuthology/templates/rocketchat-sleep-before-teardown.jinja2 create mode 100644 teuthology/test/__init__.py create mode 100644 teuthology/test/fake_archive.py create mode 100644 teuthology/test/fake_fs.py create mode 100644 teuthology/test/integration/__init__.py create mode 100644 teuthology/test/integration/test_suite.py create mode 100644 teuthology/test/task/__init__.py create mode 100644 teuthology/test/task/test_ansible.py create mode 100644 teuthology/test/task/test_ceph_ansible.py create mode 100644 teuthology/test/task/test_console_log.py create mode 100644 teuthology/test/task/test_install.py create mode 100644 teuthology/test/task/test_internal.py create mode 100644 teuthology/test/task/test_kernel.py create mode 100644 teuthology/test/task/test_pcp.py create mode 100644 teuthology/test/task/test_selinux.py create mode 100644 teuthology/test/test_config.py create mode 100644 teuthology/test/test_contextutil.py create mode 100644 teuthology/test/test_describe_tests.py create mode 100644 teuthology/test/test_email_sleep_before_teardown.py create mode 100644 teuthology/test/test_exit.py create mode 100644 teuthology/test/test_get_distro.py create mode 100644 teuthology/test/test_get_distro_version.py create mode 100644 teuthology/test/test_get_multi_machine_types.py create mode 100644 teuthology/test/test_job_status.py create mode 100644 teuthology/test/test_ls.py create mode 100644 teuthology/test/test_misc.py create mode 100644 teuthology/test/test_nuke.py create mode 100644 teuthology/test/test_packaging.py create mode 100644 teuthology/test/test_parallel.py create mode 100644 teuthology/test/test_repo_utils.py create mode 100644 teuthology/test/test_report.py create mode 100644 teuthology/test/test_results.py create mode 100644 teuthology/test/test_run.py create mode 100644 teuthology/test/test_safepath.py create mode 100644 teuthology/test/test_schedule.py create mode 100644 teuthology/test/test_scrape.py create mode 100644 teuthology/test/test_timer.py create mode 100644 teuthology/test/test_vps_os_vers_parameter_checking.py create mode 100644 teuthology/test/test_worker.py create mode 100644 teuthology/timer.py create mode 100644 teuthology/util/__init__.py create mode 100644 teuthology/util/compat.py create mode 100644 teuthology/util/flock.py create mode 100644 teuthology/worker.py create mode 100644 tox.ini create mode 100755 update-requirements.sh create mode 100755 watch-suite.sh diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..560e800171 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +omit = */test/* diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..d2f4d315d7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +venv +virtualenv +.tox diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000..1875598fde --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,30 @@ +name: CI + +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + test: + name: CI on python${{ matrix.python }} via ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.6, 3.9] + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install tox + run: pip install tox + - name: Run flake8 + run: tox -e flake8 + - name: Run unit tests + run: tox -e py3 + - name: Run docs build + run: tox -e docs diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml new file mode 100644 index 0000000000..9417e9f668 --- /dev/null +++ b/.github/workflows/dependencies.yml @@ -0,0 +1,47 @@ +name: dependencies + +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + upgrade: + name: Test dependencies + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.6, 3.8] + steps: + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python }} + - name: Checkout default branch + uses: actions/checkout@v3 + with: + ref: main + path: teuthology + - name: virtualenv + run: | + pip install --user virtualenv + virtualenv ./virtualenv + cd ./virtualenv/lib/python* + touch no-global-site-packages.txt + working-directory: ./teuthology + - name: Initial bootstrap + run: ./bootstrap install + working-directory: ./teuthology + - name: Move initial repository + run: mv teuthology teuthology.orig + - name: Checkout desired ref + uses: actions/checkout@v3 + with: + path: teuthology + - name: Move virtualenv to new checkout + run: mv ./teuthology.orig/virtualenv ./teuthology/ + - name: Re-run bootstrap + run: ./bootstrap install + working-directory: ./teuthology diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml new file mode 100644 index 0000000000..162d510323 --- /dev/null +++ b/.github/workflows/integration.yml @@ -0,0 +1,12 @@ +name: integration +on: + pull_request: + workflow_dispatch: +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Test using docker-compose + run: ./start.sh + working-directory: ./docs/docker-compose diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..19ded7805e --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +*~ +.#* +## the next line needs to start with a backslash to avoid looking like +## a comment +\#*# +.*.swp + +*.pyc +*.pyo +.tox + +/*.egg-info +/virtualenv +/build +/*.yaml +docs/build + +.ropeproject +.coverage + +# autogenerated docs from sphinx-apidoc +docs/modules.rst +docs/teuthology.rst +docs/teuthology.*.rst + +# PyCharm +.idea + +# vscode +.vscode/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000..1595391234 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,3 @@ +teuthology: + tags: [ ceph-workbench ] + script: "git clean -ffqdx ; ./bootstrap install ; unset OS_AUTH_URL ; source virtualenv/bin/activate ; pip install tox ; tox" diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..66f140c890 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,19 @@ +--- +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 +formats: [] +build: + image: latest +python: + version: 3.7 + install: + - method: pip + path: . + extra_requirements: + - orchestra + - requirements: docs/requirements.txt +sphinx: + builder: html + configuration: docs/conf.py diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..f1bc1c8816 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,17 @@ +env: HOME=/home/travis + +sudo: required +dist: trusty + +before_install: + - sudo apt-get -qq update + - ./bootstrap install + +language: python +python: + - 2.7 + +install: + - pip install tox + +script: tox -rv diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..02914d4f91 --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Red Hat, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..2683cd6549 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +include *.rst +include requirements.txt +include tox.ini +include pytest.ini diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000..f83be7992f --- /dev/null +++ b/README.rst @@ -0,0 +1,11 @@ +=================================================== +`Teuthology` -- The Ceph integration test framework +=================================================== + + +Welcome! Teuthology's documentation is primarily hosted at `docs.ceph.com +`__. + +You can also look at docs `inside this repository `__, but note that +GitHub's `RST `__ rendering is quite +limited. Mainly that means that links between documents will be broken. diff --git a/beanstalk/alpine/Dockerfile b/beanstalk/alpine/Dockerfile new file mode 100644 index 0000000000..7afb0005b8 --- /dev/null +++ b/beanstalk/alpine/Dockerfile @@ -0,0 +1,13 @@ +# For beanstalkd 1.12 use edge branch +#FROM alpine:edge + +FROM alpine:3.12.3 + +MAINTAINER Kyrylo Shatskyy + +RUN apk update && apk add beanstalkd beanstalkd-doc + +ENV BEANSTALK_ADDR "0.0.0.0" +ENV BEANSTALK_PORT "11300" + +CMD /usr/bin/beanstalkd -V -l $BEANSTALK_ADDR -p $BEANSTALK_PORT diff --git a/bootstrap b/bootstrap new file mode 100755 index 0000000000..42415be19b --- /dev/null +++ b/bootstrap @@ -0,0 +1,172 @@ +#!/bin/bash +set -e +if [ $# -eq 0 ]; then + install=false +else + if [ "$1" = "install" ]; then + install=true + else + echo "Invalid command, supported commands are: 'install'" + exit 1 + fi +fi + +if [[ "$PYTHON" =~ "python2" ]]; then + echo "python2 is not supported." >&2 + exit 1 +fi + +PYTHON=${PYTHON:-"python3"} +VENV=${VENV:-"virtualenv"} + +case "$(uname -s)" in +Linux) + case "$(lsb_release --id --short)" in + Ubuntu|Debian|LinuxMint) + deps=(qemu-utils python3-dev libssl-dev python3-pip python3-venv libev-dev libvirt-dev libffi-dev libyaml-dev) + for package in ${deps[@]}; do + if [ "$(dpkg --status -- $package|sed -n 's/^Status: //p')" != "install ok installed" ]; then + # add a space after old values + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages:" 1>&2 + echo "$missing" + if [ "$install" = true ]; then + echo "Installing missing packages..." + sudo apt-get -y install $missing + else + echo "Please install missing packages or run './bootstrap install' if you have sudo" + echo "sudo apt-get -y install $missing" + exit 1 + fi + fi + ;; + RedHatEnterpriseWorkstation|RedHatEnterpriseServer|RedHatEnterprise|CentOS) + deps=(python3-pip python3-devel mariadb-devel libev-devel libvirt-devel libffi-devel) + for package in ${deps[@]}; do + if [ "$(rpm -q $package)" == "package $package is not installed" ]; then + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages:" 1>&2 + echo "$missing" + if [ "$install" = true ]; then + echo "Installing missing packages..." + sudo yum -y install $missing + else + echo "Please install missing packages or run './bootstrap install' if you have sudo" + echo "sudo yum -y install $missing" + exit 1 + fi + fi + ;; + Fedora) + deps=(python3-pip python3-devel libev-devel libvirt-devel libffi-devel) + for package in ${deps[@]}; do + if [ "$(rpm -q $package)" == "package $package is not installed" ]; then + missing="${missing:+$missing }$package" + fi + done + fedora_release=$(lsb_release -rs) + package_manager=dnf + if [ $fedora_release -lt 23 ]; then + package_manager=yum + fi + if [ -n "$missing" ]; then + echo "$0: missing required packages:" 1>&2 + echo "$missing" + if [ "$install" = true ]; then + echo "Installing missing packages..." + sudo $package_manager -y install $missing + else + echo "Please install missing packages or run './bootstrap install' if you have sudo" + echo "sudo $package_manager -y install $missing" + exit 1 + fi + fi + ;; + "openSUSE project"|"SUSE LINUX"|"openSUSE") + deps=(python3-pip python3-devel python3 libev-devel libvirt-devel libffi-devel) + for package in ${deps[@]}; do + if [ "$(rpm -q $package)" == "package $package is not installed" ]; then + if [ "$(rpm -q --whatprovides $package)" == "no package provides $package" ]; then + missing="${missing:+$missing }$package" + fi + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo "sudo zypper install $missing" + exit 1 + fi + ;; + *) + echo "This script does not support your Linux distribution yet. Patches encouraged!" + exit 1 + ;; + esac + ;; +Darwin) + if ! brew --version &>/dev/null; then + echo "You need Homebrew: http://brew.sh/" + exit 1 + fi + for keg in python libvirt libev libffi; do + if brew list $keg >/dev/null 2>&1; then + echo "Found $keg" + else + if [ "$install" = true ]; then + brew install $keg + else + missing="${missing:+$missing }$keg" + echo "Please install missing packages or run './bootstrap install':" + echo "brew install $missing" + exit 1 + fi + fi + done + ;; +*) + echo "This script does not support your OS yet. Patches encouraged!" + exit 1 + ;; +esac + +# If the venv was set to use system site-packages, fix that +if [ -f "$VENV/pyvenv.cfg" ]; then + sed -i'' -e 's/\(include-system-site-packages\s*=\s*\)true/\1false/g' $VENV/pyvenv.cfg +fi + +export LC_ALL=en_US.UTF-8 + +if [ -z "$NO_CLOBBER" ] && [ ! -e "./$VENV/bin/pip" ]; then + rm -rf virtualenv +fi + +if [ -z "$NO_CLOBBER" ] || [ ! -e ./$VENV ]; then + python3 -m venv $VENV +fi + +./$VENV/bin/pip install packaging + +# It is impossible to upgrade ansible from 2.9 to 2.10 via pip. +# See https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.10.html#known-issues +if [ -f "$VENV/bin/ansible" ]; then + ansible_version=$($VENV/bin/pip list --format json | python3 -c "import sys; import json; print(list(filter(lambda i: i['name'] == 'ansible', json.loads(sys.stdin.read())))[0]['version'])") + uninstall_ansible=$(./$VENV/bin/python3 -c "from packaging.version import parse; print(parse('$ansible_version') < parse('2.10.0'))") + if [ "$uninstall_ansible" = "True" ]; then + ./$VENV/bin/pip uninstall -y ansible + fi +fi + +# First, upgrade pip +./$VENV/bin/pip install --upgrade pip + +# By default, install teuthology in editable mode +./$VENV/bin/pip install ${PIP_INSTALL_FLAGS:---editable '.[test]'} + +# Check to make sure requirements are met +./$VENV/bin/pip check diff --git a/build_qemu_image.sh b/build_qemu_image.sh new file mode 100755 index 0000000000..614f519aaf --- /dev/null +++ b/build_qemu_image.sh @@ -0,0 +1,61 @@ +#!/bin/sh -x +set -e + +IMAGE_URL=http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-disk1.img + +wget -O base.qcow2 $IMAGE_URL + +image=base.raw +qemu-img convert -O raw base.qcow2 $image +rm -f base.qcow2 + +# Note: this assumes that sector size is 512, and that there's only one +# partition. very brittle. +START_SECT=$(fdisk -lu $image | grep ^$image | awk '{print $3}') +START_BYTE=$(echo "$START_SECT * 512" | bc) + +root=/tmp/$$ + +cleanup() { + sudo chroot $root rm -f /etc/resolv.conf || true + sudo chroot $root ln -s ../run/resolvconf/resolv.conf /etc/resolv.conf || true + sudo umount $root/proc || true + sudo umount $root/sys || true + sudo umount $root/dev/pts || true + sudo umount $root + sudo rmdir $root +} +trap cleanup INT TERM EXIT + +sudo mkdir $root +sudo mount -o loop,offset=$START_BYTE $image $root + +# set up chroot +sudo mount -t proc proc $root/proc +sudo mount -t sysfs sysfs $root/sys +sudo mount -t devpts devptr $root/dev/pts + +# set up network access +sudo chroot $root rm /etc/resolv.conf +sudo cp /etc/resolv.conf $root/etc/resolv.conf + +# packages +# These should be kept in sync with ceph-qa-chef.git/cookbooks/ceph-qa/default.rb +sudo chroot $root apt-get -y --force-yes install iozone3 bonnie++ dbench \ + tiobench build-essential attr libtool automake gettext uuid-dev \ + libacl1-dev bc xfsdump dmapi xfslibs-dev + +# install ltp without ltp-network-test, so we don't pull in xinetd and +# a bunch of other unnecessary stuff +sudo chroot $root apt-get -y --force-yes --no-install-recommends install ltp-kernel-test + +# add 9p fs support +sudo chroot $root apt-get -y --force-yes install linux-image-extra-virtual + +cleanup +trap - INT TERM EXIT + +qemu-img convert -O qcow2 $image output.qcow2 +rm -f $image + +exit 0 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000..ca57986cb2 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,81 @@ +# +# How to use this file +# ==================== +# +# Before moving forward the docker must be installed and your user +# must be in docker group, for example for openSUSE do the following: +# +# sudo zypper in docker +# sudo usermod -a -G docker $USER +# +# Note: to activate user group relogin might be required. +# +# Create a directory where the development setup be located. +# Go to the directory and checkout pulpito, paddles, and teuthology. +# +# git clone https://github.com/ceph/paddles +# git clone https://github.com/ceph/pulpito +# git clone https://github.com/ceph/teuthology +# +# Note: use your own repos or branches. +# +# Go to teuthology, bootstrap virtual environment and install docker-composer. +# +# cd teuthology +# ./bootstrap +# . virtualenv/bin/activate +# pip install docker-compose +# +# Make sure docker is running, build images for postgres, paddles, pulpito and beanstalk. +# +# sudo systemctl start docker +# docker-compose build +# +# Run the services: +# +# docker-compose up +# +# Todo: add worker or/and dispatcher service + +version: "3.7" + +services: + postgres: + image: postgres + ports: + - "5432:5432" + environment: + POSTGRES_PASSWORD: secret + POSTGRES_USER: paddles + POSTGRES_DB: paddles + PGDATA: /var/lib/postgresql/data/pgdata + volumes: + - $HOME/.teuthology/postgres:/var/lib/postgresql/data + healthcheck: + test: | + psql -h localhost -U paddles -c 'SELECT 1=1' paddles + timeout: 60s + interval: 5s + start_period: 10s + paddles: + build: ../paddles + environment: + PADDLES_SERVER_HOST: 0.0.0.0 + PADDLES_SQLALCHEMY_URL: postgresql+psycopg2://paddles:secret@postgres/paddles + ports: + - "8180:8080" + depends_on: + postgres: + condition: service_healthy + pulpito: + build: ../pulpito + environment: + PULPITO_PADDLES_ADDRESS: http://paddles:8080 + ports: + - "8081:8081" + depends_on: + - paddles + beanstalk: + build: beanstalk/alpine + ports: + - "11300:11300" diff --git a/docs/COMPONENTS.rst b/docs/COMPONENTS.rst new file mode 100644 index 0000000000..7c3dd03b6b --- /dev/null +++ b/docs/COMPONENTS.rst @@ -0,0 +1,70 @@ +=================== +Ceph Lab Components +=================== + +The distinct hardware/software components that a lab is composed of and the way +they interact between them is described here. In general, a lab is composed of +a scheduler, worker(s), package builder (`gitbuilder +`__), job database (`paddles +`__), job archive, a web UI (`pulpito +`__) and test nodes. + +| +| + +.. image:: cephlab.png + :align: center + :alt: Components of a Ceph Lab. + +| +| + +In the figure above, every service appears on a separate machine but this is +not a requirement (see :ref:`lab_setup` for an alternative setup). Jobs are +submitted to the scheduler, which are then picked up by dispatcher and +processesed by job supervisors. One supervisor processes and keeps track of a +job (one at a time). The output of the job (logs and files associated to an +execution) is stored in the archive, which is a path in the file system where +the dispatcher is running. The job database contains information about the status +of jobs and test nodes, as well as results of executions (# of tests passed, +failed, etc.). All this information can be visualized in ``pulpito``, the web +UI. For an example, see Ceph community's Lab `here `__. + +Test nodes can be grouped in classes (referred to as ``machine-type``), +allowing teuthology schedule jobs across multiple hardware setups. + +Life of a Teuthology Job +======================== + +The teuthology scheduler exposes a work queue (using `beanstalkd +`__) where jobs are submitted. The life of a +job begins when ``teuthology-suite`` is executed, which is when a job is +prepared and queued (``teuthology-schedule`` is implicitly invoked). When a job +is created (or whenever the status of a job is changed, e.g. from queued to +started), information about the job is recorded in ``paddles``'s internal +database. Depending on the priority of the job, the scheduler eventually +determines when a job can get executed. At this point, +``teuthology-dispatcher`` checks the lock status of the requested +machines by querying ``paddles``, acquires locks of the +nodes if they are available, and invokes ``teuthology-dispatcher`` in +``supervisor`` mode. ``supervisor`` reimages the target machines and invokes +``teuthology`` (the command). ``teuthology`` proceeds to execute the job +(execute every task in the YAML job description). After the execution is +completed (ie ``teuthology`` process exits), ``supervisor`` unlocks or nukes +the target machines depending on the status of the job. If the requested +machines are not available, the ``dispatcher`` waits for the machines to be +available before running anymore jobs. Results from the job are stored in the +archive directory of the worker for forensic analysis. + +Since `QA suites `__ usually +specify ``install`` and ``ceph`` tasks, we briefly describe what they do. When +a suite is scheduled (via ``teuthology-suite``), the branch that is being +worked against has to be specified (e.g. a git ``SHA`` or ``ref``). Packages +for the given branch and distro are probed on gitbuilder to see if they exist. +Once this and other sanity checks pass, the job is created and scheduled. Once +the job initializes, the ``install`` task pulls and installs Ceph packages from +``gitbuilder``. The installation task might also be preceded by a ``kernel`` +task which first reboots testnodes (and optionally installs) into a specified +kernel. The ``ceph`` task subsequently configures and launches the cluster. At +this point, Ceph is ready to receive requests from other tasks (such as +``rados``). diff --git a/docs/ChangeLog.rst b/docs/ChangeLog.rst new file mode 100644 index 0000000000..218a0baa2e --- /dev/null +++ b/docs/ChangeLog.rst @@ -0,0 +1,6 @@ +Changelog +========= + +0.1.0 +----- +* (Actual changelog coming soon) diff --git a/docs/INSTALL.rst b/docs/INSTALL.rst new file mode 100644 index 0000000000..28a96c9cd1 --- /dev/null +++ b/docs/INSTALL.rst @@ -0,0 +1,119 @@ +.. _installation_and_setup: + +Installation and setup +====================== + +Ubuntu, Fedora & SUSE/openSUSE +------------------------------ +First, clone the `git repository `__:: + + git clone https://github.com/ceph/teuthology.git + +Next, run the bootstrap script, which will do everything for you assuming +you have ``sudo``:: + + cd teuthology + ./bootstrap + +Finally, activate the ``virtualenv``:: + + source virtualenv/bin/activate + +Run a teuthology command to confirm that everything's working. For instance:: + + teuthology --help + +MacOS X +------- + +The ``bootstrap`` script was recently updated to support MacOS X using `homebrew `_:: + + ./bootstrap + +**Note**: Certain features might not work properly on MacOS X. Patches are +encouraged, but it has never been a goal of ours to run a full ``teuthology`` +setup on a Mac. + +Other operating systems +----------------------- + +Patches are welcomed to add ``bootstrap`` support for other operating systems. Until then, manual installs are possible + +First install the non-PyPI dependencies:: + + python-dev python-pip python-virtualenv libevent-dev python-libvirt + +Next, clone its `git repository `__, +create a `virtualenv `__, and +install dependencies. The instructions are given below:: + + git clone https://github.com/ceph/teuthology/ + cd teuthology + virtualenv --python python3 ./virtualenv + source virtualenv/bin/activate + pip install --upgrade pip + pip install -r requirements.txt + python setup.py develop + + +Teuthology in PyPI +------------------ + +However if you prefer, you may install ``teuthology`` from `PyPI `__:: + + pip install teuthology + + +**Note**: The version in PyPI can be (*far*) behind the development version. + +Or from GitHub:: + + pip install git+https://github.com/ceph/teuthology#egg=teuthology[orchestra] + +where the dependencies for orchestrating are installed. They are used for +interacting with the services to schedule tests and to report the test results. + + +Update Dependencies +------------------- + +We track the dependencies using ``requirements.txt``. These packages are +tested, and should work with teuthology. But if you want to bump up the +versions of them, please use the following command to update these files:: + + ./update-requirements.sh -P + +Please upgrade pip-tool using following command :: + + pip install pip-tools --upgrade + +if the command above fails like:: + + Traceback (most recent call last): + File "/home/kchai/teuthology/virtualenv/bin/pip-compile", line 5, in + from piptools.scripts.compile import cli + File "/home/kchai/teuthology/virtualenv/local/lib/python2.7/site-packages/piptools/scripts/compile.py", line 11, in + from pip.req import InstallRequirement, parse_requirements + ImportError: No module named req + +Add Dependencies +---------------- + +td,dr: please add the new dependencies in both ``setup.py`` and +``requirements.in``. + +We also use ``pip install `` to install teuthology in some Ceph's unit +tests. To cater their needs, some requirements are listed in ``setup.py`` as +well, so that ``pip install`` can pick them up. We could just avoid duplicating +the packages specifications in two places by putting:: + + -e .[orchestra,test] + +in ``requirements.in``. But dependabot includes:: + + -e file:///home/dependabot/dependabot-updater/tmp/dependabot_20200617-72-1n8af4b # via -r requirements.in + +in the generated ``requirements.txt``. This renders the created pull request +useless without human intervention. To appease dependabot, a full-blown +``requirements.in`` collecting all direct dependencies listed by ``setup.py`` +is used instead. diff --git a/docs/LAB_SETUP.rst b/docs/LAB_SETUP.rst new file mode 100644 index 0000000000..b2fbbe0534 --- /dev/null +++ b/docs/LAB_SETUP.rst @@ -0,0 +1,142 @@ +.. _lab_setup: + +========================== +Teuthology Lab Setup Notes +========================== + +Introduction +============ + +We recently set up a new lab for Ceph testing and decided to document the parts of the process that are most relevant to teuthology. This is the result. + +We started by setting aside two of the test machines: one as the 'teuthology node', and another as the 'paddles/pulpito node'. These would be used to orchestrate automated testing and to store and serve the results on our intranet. + +paddles/pulpito node +==================== + +We're currently running both paddles and pulpito on the same node. We have a proxy server up front listening on port 80 that forwards to the proper service based on which hostname is used. Feel free to modify our `paddles <_static/nginx_paddles>`_ and `pulpito <_static/nginx_pulpito>`_ configurations for your use. + +Do the following as root or as another user with sudo access:: + + sudo apt-get install git python-dev python-virtualenv postgresql postgresql-contrib postgresql-server-dev-all supervisor + sudo -u postgres createuser paddles -P + sudo -u postgres createdb paddles + +Create a separate user for paddles and puplito. We used 'paddles' and 'pulpito'. + + +paddles +------- +Follow instructions at https://github.com/ceph/paddles/blob/main/README.rst + + +pulpito +------- +Follow instructions at https://github.com/ceph/pulpito/blob/main/README.rst + + +Starting up +----------- + +Back as the 'root or sudo' user:: + + sudo cp ~paddles/paddles/supervisord_paddles.conf /etc/supervisor/conf.d/paddles.conf + sudo supervisorctl reread && sudo supervisorctl update paddles && sudo supervisorctl start paddles + sudo cp ~pulpito/pulpito/supervisord_pulpito.conf /etc/supervisor/conf.d/pulpito.conf + sudo supervisorctl reread && sudo supervisorctl update pulpito && sudo supervisorctl start pulpito + + +Test Nodes +========== + +Each node needs to have a user named 'ubuntu' with passwordless sudo access. + +It's also necessary to generate an ssh key pair that will be used to provide +passwordless authentication to all the test nodes, and put the public key in +``~/.ssh/authorized_keys`` on all the test nodes. + + +Teuthology Node +=============== + +Create an ``/etc/teuthology.yaml`` that looks like:: + + lab_domain: example.com + lock_server: http://paddles.example.com:8080 + results_server: http://paddles.example.com:8080 + queue_host: localhost + queue_port: 11300 + results_email: you@example.com + archive_base: /home/teuthworker/archive + +Do the following as root or as another user with sudo access: + +Create two additional users: one that simply submits jobs to the queue, and +another that picks them up from the queue and executes them. We use +'teuthology' and 'teuthworker', respectively. + +Give both users passwordless sudo access. + +Copy the ssh key pair that you created to access the test nodes into each of +these users' ``~/.ssh`` directory. + +Install these packages:: + + sudo apt-get -y install git python-dev python-pip python-virtualenv libevent-dev python-libvirt beanstalkd + +Now, set up the two users you just created: + + +Scheduler +--------- +As 'teuthology', do the following:: + + mkdir ~/src + git clone https://github.com/ceph/teuthology.git src/teuthology_main + pushd src/teuthology_main/ + ./bootstrap + popd + + +Worker +------ +As 'teuthworker', do the following:: + + mkdir ~/src + git clone https://github.com/ceph/teuthology.git src/teuthology_main + pushd src/teuthology_main/ + ./bootstrap + popd + mkdir ~/bin + wget -O ~/bin/worker_start https://raw.githubusercontent.com/ceph/teuthology/main/docs/_static/worker_start.sh + echo 'PATH="$HOME/src/teuthology_main/virtualenv/bin:$PATH"' >> ~/.profile + source ~/.profile + mkdir -p ~/archive/worker_logs + worker_start magna 1 + + +Submitting Nodes +================ + +First:: + + wget https://raw.githubusercontent.com/ceph/teuthology/main/docs/_static/create_nodes.py + +Edit ``create_nodes.py`` to generate the hostnames of the machines you want to submit to paddles. + +Now to do the work:: + + python create_nodes.py + teuthology-lock --owner initial@setup --list-targets > /tmp/targets + teuthology --owner initial@setup /tmp/targets + teuthology-lock --owner initial@setup --unlock -t /tmp/targets + + +Serving Test Logs +================= + +pulpito tries to provide links to test logs. Out-of-the-box, those links will be broken, but are easy to fix. + +First, install your favorite web server on the teuthology node. If you use nginx, you may use `our configuration <_static/nginx_test_logs>`_ as a template. + +Once you've got log files being served, edit paddles' ``config.py`` and update the ``job_log_href_templ`` value. Restart paddles when you're done. diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..6fd9f99659 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,180 @@ +# Makefile for Sphinx documentation +# + +GENERATED_API_DOCS = {modules,teuthology{,.openstack,.openstack.test,.orchestra,.task,.task.tests}}.rst + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +SOURCEDIR = . +BUILDDIR = build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* $(GENERATED_API_DOCS) + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/teuthology.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/teuthology.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/teuthology" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/teuthology" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/README.rst b/docs/README.rst new file mode 100644 index 0000000000..58b03f0c17 --- /dev/null +++ b/docs/README.rst @@ -0,0 +1,150 @@ +=================================================== +`Teuthology` -- The Ceph integration test framework +=================================================== + +``teuthology`` is an automation framework for `Ceph +`__, written in `Python +`__. It is used to run the vast majority of its tests +and was developed because the unique requirements of testing such a highly +distributed system with active kernel development meant that no other framework +existed that could do its job. + +The name '`teuthology `__' refers to the +study of cephalopods. + + +Overview +======== + +The general mode of operation of ``teuthology`` is to remotely orchestrate +operations on remote hosts over SSH, as implemented by `Paramiko +`__. A typical `job` consists of multiple nested +`tasks`, each of which perform operations on a remote host over the network. + +When testing, it is common to group many `jobs` together to form a `test run`. + +If you are new to teuthology and simply want to run existing tests, check out +:ref:`intro_testers` + + +Provided Utilities +================== +* :ref:`teuthology` - Run individual jobs +* :ref:`teuthology-kill` - Kill running jobs or entire runs +* :ref:`teuthology-lock` - Lock, unlock, and update status of machines +* :ref:`teuthology-ls` - List job results by examining an archive directory +* :ref:`teuthology-openstack` - Use OpenStack backend (wrapper around ``teuthology-suite``) +* :ref:`teuthology-nuke` - Attempt to return a machine to a pristine state +* :ref:`teuthology-queue` - List, or delete, jobs in the queue +* :ref:`teuthology-report` - Submit test results to a web service (we use `paddles `__) +* :ref:`teuthology-results` - Examing a finished run and email results +* :ref:`teuthology-schedule` - Schedule a single job +* :ref:`teuthology-suite` - Schedule a full run based on a suite (see `suites` in `ceph-qa-suite `__) +* :ref:`teuthology-updatekeys` - Update SSH host keys for a machine +* :ref:`teuthology-worker` - Worker daemon to monitor the queue and execute jobs + +For a description of the distinct services that utilities interact with see +:ref:`components`. + +Installation +============ + +See :ref:`installation_and_setup`. + + +Infrastructure +============== + +The examples in this document are based on the lab machine configuration used +by the Red Hat Ceph development and quality assurance teams +(see :ref:`lab_setup`). Other instances of a Ceph Lab being used in a +development or testing environment may differ from these examples. + + +Detailed test configuration +=========================== + +See :ref:`detailed_test_config`. + + +Virtual Machine Support +======================= + +For OpenStack support, see :ref:`openstack-backend` + +For 'vps' support using `downburst `__, see +:ref:`downburst_vms` + + +Test Suites +=========== + +Each suite name is determined by the name of the directory in ``ceph-qa-suite`` +that contains that suite. The directory contains subdirectories and yaml files, +which, when assembled, produce valid tests that can be run. The test suite +application generates combinations of these files and thus ends up running a +set of tests based off the data in the directory for the suite. + +To run a suite, enter:: + + teuthology-suite -s [-c ] [-k ] [-e email] [-f flavor] [-t ] [-m ] + +where: + +* ``suite``: the name of the suite (the directory in ceph-qa-suite). +* ``ceph``: ceph branch to be used. +* ``kernel``: version of the kernel to be used. +* ``email``: email address to send the results to. +* ``flavor``: the ceph packages shaman flavor to run against +* ``teuth``: version of teuthology to run +* ``mtype``: machine type of the run +* ``templates``: template file used for further modifying the suite (optional) + +For example, consider:: + + teuthology-suite -s rbd -c wip-fix -k distro -e bob.smith@foo.com -f default -t jewel -m mira + +The above command runs the rbd suite using the wip-fix branch of ceph, the +jewel kernel, with a 'default' ceph shaman build packages flavor, and the teuthology jewel branch +will be used. It will run on mira machines and send an email to +bob.smith@foo.com when it's completed. For more details on +``teuthology-suite``, please consult the output of ``teuthology-suite --help``. +Read more about running integration tests using teuthology at `docs.ceph.com +`__. + +In order for a queued task to be run, a teuthworker thread on +``teuthology.front.sepia.ceph.com`` needs to remove the task from the queue. +On ``teuthology.front.sepia.ceph.com``, run ``ps aux | grep teuthology-worker`` +to view currently running tasks. If no processes are reading from the test +version that you are running, additonal teuthworker tasks need to be started. +To start these tasks: + +* copy your build tree to ``/home/teuthworker`` on ``teuthology.front.sepia.ceph.com``. +* Give it a unique name (in this example, xxx) +* start up some number of worker threads (as many as machines you are testing with, there are 60 running for the default queue):: + + /home/virtualenv/bin/python + /var/lib/teuthworker/xxx/virtualenv/bin/teuthworker + /var/lib/teuthworker/archive --tube xxx + --log-dir /var/lib/teuthworker/archive/worker_logs + + Note: The threads on teuthology.front.sepia.ceph.com are started via + ~/teuthworker/start.sh. You can use that file as a model for your + own threads, or add to this file if you want your threads to be + more permanent. + +Once the suite completes, an email message is sent to the users specified, and +a large amount of information is left on ``teuthology.front.sepia.ceph.com`` in +``/var/lib/teuthworker/archive``. + +This is symbolically linked to /a for convenience. A new directory is created +whose name consists of a concatenation of the date and time that the suite was +started, the name of the suite, the ceph branch tested, the kernel used, and +the flavor. For every test run there is a directory whose name is the pid +number of the pid of that test. Each of these directory contains a copy of the +``teuthology.log`` for that process. Other information from the suite is +stored in files in the directory, and task-specific yaml files and other logs +are saved in the subdirectories. + +These logs are also publically available at +``http://qa-proxy.ceph.com/teuthology/``. diff --git a/docs/_static/create_nodes.py b/docs/_static/create_nodes.py new file mode 100755 index 0000000000..3645b613ca --- /dev/null +++ b/docs/_static/create_nodes.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# A sample script that can be used while setting up a new teuthology lab +# This script will connect to the machines in your lab, and populate a +# paddles instance with their information. +# +# You WILL need to modify it. + +import logging +import sys +from teuthology.orchestra.remote import Remote +from teuthology.lock.ops import update_inventory + +paddles_url = 'http://paddles.example.com/nodes/' + +machine_type = 'typica' +lab_domain = 'example.com' +# Don't change the user. It won't work at this time. +user = 'ubuntu' +# We are populating 'typica003' -> 'typica192' +machine_index_range = range(3, 192) + +log = logging.getLogger(sys.argv[0]) +logging.getLogger("requests.packages.urllib3.connectionpool").setLevel( + logging.WARNING) + + +def get_shortname(machine_type, index): + """ + Given a number, return a hostname. Example: + get_shortname('magna', 3) = 'magna003' + + Modify to suit your needs. + """ + return machine_type + str(index).rjust(3, '0') + + +def get_info(user, fqdn): + remote = Remote('@'.join((user, fqdn))) + return remote.inventory_info + + +def main(): + shortnames = [get_shortname(machine_type, i) for i in machine_index_range] + fqdns = ['.'.join((name, lab_domain)) for name in shortnames] + for fqdn in fqdns: + log.info("Creating %s", fqdn) + base_info = dict( + name=fqdn, + locked=True, + locked_by='initial@setup', + machine_type=machine_type, + description="Initial node creation", + ) + try: + info = get_info(user, fqdn) + base_info.update(info) + base_info['up'] = True + except Exception as exc: + log.error("{fqdn} is down".format(fqdn=fqdn)) + base_info['up'] = False + base_info['description'] = repr(exc) + update_inventory(base_info) + +if __name__ == '__main__': + main() diff --git a/docs/_static/nginx_paddles b/docs/_static/nginx_paddles new file mode 100644 index 0000000000..c1e0896f2c --- /dev/null +++ b/docs/_static/nginx_paddles @@ -0,0 +1,11 @@ +server { + server_name paddles.example.com; + proxy_send_timeout 600; + proxy_connect_timeout 240; + location / { + proxy_pass http://paddles.example.com:8080/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } + +} diff --git a/docs/_static/nginx_pulpito b/docs/_static/nginx_pulpito new file mode 100644 index 0000000000..de9147ca88 --- /dev/null +++ b/docs/_static/nginx_pulpito @@ -0,0 +1,11 @@ +server { + server_name pulpito.example.com; + proxy_send_timeout 600; + proxy_connect_timeout 240; + location / { + proxy_pass http://pulpito.example.com:8081/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } + +} diff --git a/docs/_static/nginx_test_logs b/docs/_static/nginx_test_logs new file mode 100644 index 0000000000..139a0a197b --- /dev/null +++ b/docs/_static/nginx_test_logs @@ -0,0 +1,7 @@ +server { + allow all; + autoindex on; + server_name test_logs.example.com; + root /home/teuthworker/archive; + default_type text/plain; +} diff --git a/docs/_static/worker_start.sh b/docs/_static/worker_start.sh new file mode 100644 index 0000000000..e2b4424aed --- /dev/null +++ b/docs/_static/worker_start.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# A simple script used by Red Hat to start teuthology-worker processes. + +ARCHIVE=${ARCHIVE:-"$HOME/archive"} +WORKER_LOGS=$ARCHIVE/worker_logs + +function start_workers_for_tube { + echo "Starting $2 workers for $1" + for i in `seq 1 $2` + do + teuthology-worker -v --archive-dir $ARCHIVE --tube $1 --log-dir $WORKER_LOGS & + done +} + +function start_all { + start_workers_for_tube plana 50 + start_workers_for_tube mira 50 + start_workers_for_tube vps 80 + start_workers_for_tube burnupi 10 + start_workers_for_tube tala 5 + start_workers_for_tube saya 10 + start_workers_for_tube multi 100 +} + +function main { + printf '%s\n' "$*" + if [[ -z "$*" ]] + then + start_all + elif [ ! -z "$2" ] && [ "$2" -gt "0" ] + then + start_workers_for_tube $1 $2 + else + echo "usage: $0 [tube_name number_of_workers]" >&2 + exit 1 + fi +} + +main "$@" diff --git a/docs/_themes/ceph/static/font/ApexSans-Book.eot b/docs/_themes/ceph/static/font/ApexSans-Book.eot new file mode 100644 index 0000000000000000000000000000000000000000..332c8cbe31ad7a62f019a7c29cb82afb33c5b328 GIT binary patch literal 199888 zcmeFa34CN#l|O#&d$p&klG;+0sw}l6Rh^`=R+36O>7+VIC%u#2==1`ern^}L+HQ7~ zrBxJYo5cY~#svXU7zZKjIHIB=prWEb#$iT8bZ}5d!O<3(9=h`Xp8MXbs-)9RH{$32 z|9?KehL`v1y}O=!&bep1_de%T>4`Fx1S!P-6k#EgSqPyDIw<(f%8C1k_AQT6j(_pL z{X|is=fD5rxtmtX7qk9U(_-2~yXX=mT1VSyH=U2CVR|qAI~yt8v=(2_pbP07d_50u zPR4(`@N_<%g*3a7x`$e+UH%uPPI=Gb?RiLb5z?HG`%e5mN!GKQ*3+f(NlS4KzHXOk zh|9vp93$2K2mF5Sq;+kb z3#+TXiRVj@Yih^2+xH~znL40a;T znlsM1^z`2>`@l^^u?tA~@gL6IwSDJ*e`m{&koGNHJ!j&D{ao|wD8CE8>(4y*f=hO# zuUe1aFA&*s=e&2vcIDfvo<;fX_&s{=_Dl9Czi}MG_erFW?%saxu4^B@?jR`#QJ-n= zp7)-A!OeR=lOW|y zfBG-Je{b*Er_FpP|CX6dx8tWBD8tY~btwJzJlbqtJoBBI+s*Qg{G$I+DmV@P?k1~z zrBD;~VPJ_acC3+aR3$DB60$0fE7#)90qrh+1_U@{GZ*o4AJKg|iY}MA@Mk^Xfq&8e zRg~9e(0bGD!W0FPe*bwiM|Kz7AO12XM-`u$F{Y?8)sZN}Bfd2wZNEyKm{`CS`7b3U2(T-1Yd#JQ;R zAT2OeApNb>qI?)-?4uRRmtvwAx2kK=j@-_OzS zAH{tyEkPeT@ofW_iT<@HH`4uL8N=MmA21bT&>aZp62P zGyqH-P&U&xWr)g=c0hd}+Jn9_oOSp&;$8*Z=|P#7D|e%v=TWVA9p7(8yEoGfVmDxc z@ldKotrEn$bAVf%e>43;d=Bk?9b<)Yl4HctTzqtZ*bXMTxsn!^2OxLz%c506pl#eL=9 z7^BiljuG-R?9unjasLJG4=bBRC7!2G|aT4(a&ZcGr*JYEA+ee`&7JNO@6$$NZjG{O!M>I z8J~De=ib2+m@XCVKA_*b8TJhKBktfGl78pj88;bUj^mCoZ;^D5F_dG+V~p{Y{D;T+ zAQ8_XViou(|_ayGs`0YY@lYk}4R27t)1Ya4$y;`q_>pIrOZJ2xM zZNm64Zp?piTT3r)pV9XC%zumc%hE~ zMs@gIE6ao|z%&VXXyXYy6z9#%h)1c#)Qh~2(jFkHs$bPC%K(#EM&zezGAWGB=RiU> zL*%@I?x)A-EA&HpiC)M2_m(h=kf;)Ikr3^oLo5|*#5%D}Y!~N=E5zR^Rm#)K&(tTP zWzj%19IcJUqseG{bYXNfdQU7KOU9MBC0-u)#>4U2cyoMdd@8=H;k$2q>wlG*8Ppo3 z+vy?tXZkulM?Xhx|4oO51+|4yTRm#)6wAe0v0hAx)5Q6xEu=i9ynx!2s5cslR!5_< zw!S&Fc}i;ADQgomGc&&+n)%_(4`#lH`_nUT&OC+tZ|T*U-_XvPU(eh$^Xklh;>|0# zeo2)7;mkQ`A02l7Vhi8--^+_$&f@R2FL(ZY;^&XP^z2LDe(4FKmmYoTp_lG{>A*`D zzI5JC-HtDqUyyKpkIX`Jm45%EelOnmjd2m(L-)#042`c5q;mYz{>n%B=aJ%L$%hgc=mLz!FT{-4AWjw=F(Y0mHi^w*i`Ysp(=X|C`Zhg9 zPeUU84(+AyiZh`2>=D<{d!Qms0h7+9)4(BjL0&!+s?2lXd=~Ltv^a{~tg5MG0R7-UzAqI&!LG{!?NlMWIYJ@D_ z47_cjR%(Mx-T}Uxp)Tr%8kVJA$mt8IA5!ol8l)lM_+lt?BQ#1&AnA_NQd&mKX$7sM zRkRu^*#xbjwNU)l(*~$@8)=d@(Pr90Tj>-!mA0V|H_(mrcXTr#dK=((H)e%@q<^9> z%JG|~`#?Pp0HYtIhv^Y|l)emF`e%9^^z;O9{2Lh2ofr`wulE5uAEqy0j4p#}_#uqj zztG1pGC#(6{e=D%qj?qm095fqdY1kVM)Vr`8GQ+uu^&q0mGmPhiO&-89hC^ zfB*LAv}0m&3U8wP&B0GS{M0iQncB2zQ)HT&H*M1EM&(L++L{=iw%{VZMy4$zYbK|y zBh3#0*G8s>rz>|=Bb}PYx9E~-`^b)I$H-};Ez>6LO%w$t?zfm4rlL#sC$@7}WX%-e z)=ftv=z&q+wAzr^K8n2N^t70YMyFjP%NYuINDOb9cJr^b_~pj0mT3!DFs(EU>n*w8 zh3EU-_%kgk6Hyd~@{z*SFuZ?%6m?IVn_`J+vE6t^UuAxjA-`=xedDNQd}?~%X+}?? z_X`S)M8}Z7cUsXO9#(`Rgh2Q4f6KHrP1Cl-@FV5rHfX)Jk>TNy=+qQyy5FKSw@lm8 z(W&Y3sptqQpW+(v|C-4}bO1jS)81Xxkys483p zvtLCxOBpH_mD7%DJ&t%XGyN-=@XL4t)2d?^md@A=)}^8<~oZNhCoB z@nhNk{mT-kO^d|v0~JC9{HVc;5er25qfCf#!+T%rv>V@i$N1(?-%rBtn=VJXKziDP zTNueJF|=Hmep@yzns@DQO+=#u`vI}mF@Sjt=rXXW^?tkXV<2i~XVMzI3`7IW7R}M= zDMq#2n#l*%s3{tGP)(Y`n}&yhN?eI^MBeu&mLw)5!Ln zKst3~dj!u@n}CBzwjE6Z@g~N$_eK&Zha09B$+u-FMk@|AfmrZkibu`NSjqX37i0?x zLJcbZf?Oks*rsBsoA#s6+>3epjq+Nj?dX1FR`-*7_tA5pVzf2Ut3xT2zTXL&nFZey z0@c`wL5GKlQG6iL8;MC6w?+rrVw?2l1k=+_<~+VZ{YBNRZfLkxI`qb9SInDN1n4}Fl*FQW~K|7dNsSV5Io%3i}m+`055(e*z6*yMvZ&d!guv+jpu51>(8~?_Y z9evE2xaI;+*T@1BG(;}&T5a|)(5(6Glg?mL&|U1o-<=n-kf;@&-s}**y6AMgFl3DiMx^R zargn!M3y_@lemVc>>(oO0$jHcxoo(Q%Z;mi3fFD$L*QM-D3Pav$U8~o>mu?)->yWF zK|F_cYW|Zj(noHDy4{ZJNnF1ps#%H)X=|^6YJ_)j)SK7}S4objp#s;7aJwK+3iT}b z15qQ&XbKTEBTX~DwXB4cbQMwC1W`Nkc9>w4LVGjM5q13++dZJNWOP(MaN14lVM9We4a^zcyd@G+P zTKy!^#8RR)exkL=yY6QAGxiW|*hjQ6N;G*V(Wa}2Hlw^P&k}7#o>TDs)CIWCA=-xW zw@nk&5u)u#d)g&LJDw!k`2^AFIifS7MDO`DF1$PEMxt|_xUM1Ejpz5?LbM0*p`emYPAA=LdhHD>`t0=B1ICU-|x*p$AJ$>vT(am=f{rxipifbzqPvmrAAd*mMZCXfn&@8SnMOVLKTh=ER-%WH z?h&-(Fv`o?8M-$MGQO+?SU32)YuM9-p(A0W*S@%-aa_%!kRU+y9L z$pu6&IEnrZ?_WfnKf4=NisxXtc%JB&2Z&y|ljv7S|DP!D*J%H5Uncr}JJIX65dHTh zxPDFaN4)>jK_d3O{n>;IdEP)9|I$VD=8d?}hC|hG^<4uS3BDh`3bJw+uIEW8pC+Ng zXQp0;>(_ANyh6hI7753lBwV+UsF)z(36bzUN21bCB4i^G-bbSP5?rvJ)FOW^%84RP z%tRu-heYBaiH2ShDWqM1a+;nY(ds18b`6R4TS#=gNFsxJdg@7Jmy+m384Hna;MXJu zQP1MbNQ~r2jG0J`<64Sz%TXTiSgb;tlg=bD@g|A2sAt0kBsSvxmU~E?nkI2tl*A6S z;dJCXV~WICqa@CrByld0crUI!10?pM4d>->y+GoEtt2k0AaOC$UvhxNrD)Ilk?!(# z5?A8B4|RR8i^Nq(^Wg=!UMBI8+eln}6^Uz5&$SPc_$cyz^cfNdZYFWv1c~eYByPBy z#K)c^anp??K92OC*h%6ao+R<9Q4*hCPvSF6N!;2+;zWQqtUw@XwH!dLooD<(hnNQ>T z4$?n+6^S3TllURh{0P_c$n%q(aOvJd;@>|_;zgA6bJYLxK@z`wfy66l=YOsz@#-Xr zUjr_`*+b&DDC4*<)cIdFT-T8J9m@LsLnK~H<2nZy>iol7B>wvu5`RRwe_9EbDbl@x z`u~FGH!macKT#5gE+O&1cawMvZ8%K0o+B~iBn1{6MSPkR! z>#ioH=SEVp&y%t+M9RPKn%MD6TQIWht&@sBZ=Gt}>Bw(qp8o zJwVF(dQ!lnm5n<|nS7CyP1lgJxr>x7can0-0#Z)J`%_;bW$H{)w)c{+$oF})@e6pq`v56l+(XJgT|mm0&LQPqlrbH}^(HCzqnroQ zq&$dx4S&f`tziGVrz&`=26Gp2m0Jyz*VN`+Jk5 zJbMo*-#3x+gHc?skn*FQ9QOcA<^;^^EM+Kow!xZNPM^s+%H1w!nZs%^S({f(ubi07 zrR+AdNmT@rz^s5vfk*)Cv$Cw9^0TVi)Sx-CVtNOX*UwJQNsy?zn9nYPrbw=Jb}|lX zKou~TqBN8nE|kr=L*`AJrrVl*9wG35OmvG_D&~oO0;ZyE`I-Fp6leaBupB2#Jm(Orklq?nD~ModCgOM55; zdMJdOTdnmlgtiI1qUw^gb~UI*a&b_jP(hZmCrBb>PqZ>POUNX9lI`lrbXErZmP9<6 zRWoWf8VpAHN+fnCTC~UL<4X16SGe8qkBcZTFg0 zlZr|(CJalf2r7D$=X}8s@RvVfxT=v-W(ji^{2!5p@KZ>QtawZuM4|**ffwkDVn&RT z-aVPxgv^{4z zp>1Sy*qX^C$rfM(7D^>uW2DrU7OpeSa)2VheJAWyTRngt*YAcAiSz|~{|;}pUs3LJBujG(FX zQF7Z&$e0^AvNDruYDNunGXljywp@3FhZHQxsu7nT;W)J_^B@LRGMVbGRWqG~N_SVQ zl4@1EnMHL50lmt~V4zh^#NEmV!mW#wy(jnAe&}+4YDH`Pu)D0aZ?L9mtTPfy52gAh z`)faVfu+KRZ#7v@d27$YNaJvOSo}vY?6Rn4cWr0W>ec68YIa%|n(cP8Jy;*9s`c7b zvn$e?TstN%xYXpZ;gii|4b%hdK*z;1Z>U?9H-X4ObRb0;F`8>?Tu@UT3b~wu+FBN5 z8Z(Jlb*d&6iiE%dd>&`e6||ZWF;Z5}R9^-1>UIcn3pf#M7y_5Dl!1qV_%Sd-m;f2O zki(*4uqa}6DrU3I90YO632@O-GwWjsVv3O6?y7VOWUK_?fvH5uYIR$KHk||a3}}*>u+0+0)&XOvEh# zf2CRe&B!-GWP~tgCZK>ho|K5dbJg$qIy?JtiM;-B_+iDn>hNpg+gm0kw&2SDz5XEX zU9^1pBEEJT5BVo}ZWfoFwtf3)d^wB<;3Vc{@dg<)t4MO6X;N(*-AbL-k;7How_%Zk~v286E2Neaw? z%s|I5$}^ims{CLS%=ZwmNt==97HydYfMXX;5F9KTE8;Go%0kgRfzK^^Hf)bw^wC;< zM4|6OdnLn%RM^nj!`#aPRIXKgJ;3W$g_#xq3>JS{yL-UUYVq1)t;_`C$dHJ)g4+!$ zS>X7fVtf=Q1v^4(_C}-ab-k-Tpqeb+Saqx^WUC!MV_CY_7i*0KVv#C$va7afyt^jb z(lxYeL*vO!181yg3e>lS1D?jw&X$F7m&Md&v37s@WY18xb#aql3EP7eWtARBus-eY z{P5M6HdJpJX}+tZUS2!glkmk_s+?hm zoQF78@-`{=Y|il;DFLd61X#}AQ@jPMFBShvUrX692N_nA`edOjFKS|(S`fUp=mHne zaPds1u&eEeZD^8p1}ISGK%GY~1;Y~LDFm?1xG~3IB);ZLH&j(Mq10(^GVS|8 z_0$LN*tYGC4^CAVpLniqndziIpKyJHs1Gw&hUYZ5Vr?<1L48`qN2$*YVdzG2i7sEW zZs4Okat@o#R%R=6*bA^tnH9{XB4CJ@P?5k|+`>B>=7yGVJjpdxsQqXKA7zYmWhj*m zX0kzRDwQc$QrQfi{{FhqzT5{Y4-EQO`@^G)wy)kXG^%ukmJKXg9%{Mnx;6V+&pfM{ z#TmeOfUXnIV*V?mf6YNT!X14z#APev6N{zwko#Fi3`4B|V*;OG(GU_2Pz+p@@0#S# zT0+`L45T!XDy2w`e;t_#+M=c|9A6D5R<<_9WxEsIneI#=6G#O9ZoRWjRv=n_5RB7md*VOF3O6d&Z`@PAmNKM*Q@K-EII0__z&hYpFZVZ6e7 zsjs4qWz_#LE2+q@NhA=}IlHcxDiM@g(p>q0e}`0-hiB=UhMsBYn6e%MecuUsQ>kVa zPsx>WE1DU<;|fX&uyC+tS^o1sQ1fpAD_EW!!`$t~-0g#Gc5=>99}kBr+!hE%dQ+t$ z&n;3_%*HSt@Ux=06*dCIP$g`ws04x-sEgrkB;l0MmKiQ344nnkEXdnn-ORMbvzEHW z3;P$>S}e7T`xh>*vy7FsZe266wbkit-8!*mYipUfc+U z`dz|dGaB;~^u-|}Ifq6K0-82@(h2mg#vF~NN)w2+60Jky72Wv3M9-o`Ng5IW+}t!w zQ^{gES{lvZQB2I`h9rS7RW;8_4v6Dx43G!pBz9_zka?sw$W!?7R&hdw8}n^8utd1T=55W)-@V>;2i5a5WVK{*Gj<>iA&?=cyOSRmFddV0i< zTn)or;ntScs=6(UPdYVIQ5KAc^3RC_`LNjIk0|Xmo%I2ey}WGH9Z2<$*M*xKn?w0; zE^ZYctL)>xK_5|S(6?T?J?9ma>8Puz^gA7PEA@(A$u}7f`XfR$g(0IZ06O=e$=wi9 z%>k)@F`7f>Q(5nT6zQsjEGC6K$b67WA?=SGB{!HV1V!0>v_zJJyqe5aEJO;576wIw z)aH^2@D*LJW4UM+V6@2+R;W~WWu-6=O|*I@Pr2rrrhx`eMbq*=15ztbUsUC(Z45;- z314LKIcpa0sLO2a2{-rEoiscgO0`$jbS4e3)n%7AMKaBCyEWc15Nla8kO;U(D?Kdd zWB8f@I|Z`*CZ2U%0Gu}G;GH6&$CQ=;gv?OUV3HOVg-mR40=t2A!VKM6B8Ne64M$I^ zAqJZ!IWbdSYcVl2OBIt>cy9SxCbPsQL)PxmvY{CPBO628gqCrmFcL6{Y0WFiULeL3 ztu^-!>oe$6c+?ord5LbId`4XMy2zY;_GtbqVtu#h70dFE4jwwx#QrW?23*GaElSXL z8JBCT{T^vI;}u1T0xo9|WoC*2hoe%>LZ`tL0TRYxbXzk87tv3{Va-z7ezdGIIU{Oy zu@G2LVc6)OlLh?F#Kg_IkzOw{9|h;-bZ4M?!ULiY7(zc!84}4~uUk5>AkjNse?WXR z|K(tpzh%={bI;oC&Dr(g>WMCAYP`R15iGm!9sIMRx>`0YT{D#__qhhkm{V!tsnX=;}l3=e3eu>rT# z^B2y^WkzZh%kUW3=)NofSr`;Nv1)nw7Lo*?g`izZ>ONM324U4-?MJnI$!n1S8Ip5V+}on z;i{pYhIsdoc&L5D#bcgCwYP12Lo&Hxye)IehbFfE!?}xsWy5an8=A>;K@vD-fyEuZ zUECoyE21zPgaHelQ#mbg395S~s7bN0e4rtbo`PviNf*=Ei6w}HmRQVReTt&%(O2nj4XKo`tBt{Qu{!K|bWY`2~P41~pKi3cTFRTJ}d zHr4QuA{*beF%Vs_2in^Q;?9bO{-((-6^%=} zyO%UpY?&O}J`gUiZHsnv275N8Al z%1CcpO-);GWaW7N#`;AQ>FR}@DSL23N8^fx@%X|OjU5|;_7s-prY9Cj+udwh`Pi{& zr3wJ)0UR+)vP1wxS76EkNDUO1vt)s8v{|x$PA)KG!GysaSQKfIS92Q-+luAIw#_ zoDKDm9W$;BL$KgSXsG}tlnbbP0hTco6E(txV3;xGoB<5bbw5apT}P&eY$FVlBBP5o zTWGoXw?12xV1O?UTUBwUu)=7QNG5xFvYJ(8pMe-7J*$JxWK-IE(p0xM+&%83#yL*Hj4zD9<*Y&o|D3j+Ut5K$D zo-!>K2Ty7`ldq_RfuD;7(?+S99|2P{Cce@doRQqAM=Kn5ShEEyf#8-fE9{xT>_G5b z@RJpC4;Rne47Ptv2XNd$DdnLw%IA)R?9yV_fT8jvVbIs?o|(>uu0a!6m`Tq2i4tQc z{LXOdc(hr?gbYYhi=;i-ptOjZl;!S-t8P(yOLvXUR@2?mop-yF!)M*JbLZcko=dob zi_Tu3Do?F!9GGbJ`C2Ci7OY8>rxu-rQH^w+eEG(WS5EfSIvlkysTNYU$J)Q zO=k@!-Ginwt61qNkN1u@G>rGgpBmY>ciWO=b!Pb~=Pa7ox2e6cD%?1>t*2+(SYtTc zlwY1)de&(7DXSMX2jc_lcI;WWaL#Yi&j<*i<Pf3idhrT6rO+M z{m(DS|8mJ{VTZj}REUcYU;5)8D{mgQfkDDHqAY_P=fvEUrZ48)>85Zf7$|qyVFxYn zuBI3$jV&a`3=F8SL<72j=2n=%b>|wp%2P+=Kr{+Z(g@NsvO?5@X3(LhoM)a#Sc079 z3pMDoe8C=&@UU1cG}#ymu`Uu@th)Z25WgskE^Pm&2b;1{Xa0%%yNByt%eOu_{`=ug zJ)!OnxB@D|{ZnHHl?7F84Z-}OV0~M@v#PNxvT}*oU)Qs=v12k|ukTz~n_kr?r(#+G znX(&r*+3WM9PwyXaFz|E(FTO6u>*N(c~a!48o-RL_TXIwUXQ6A{cfL|_HAZ*X$A2~ zhepA1YeLw-|v?v68gh2c%$!uAT{g zU8pb3pez#tZlv{~P%>tFT?o-i<*a%Llt9>1K)=Lq^N)&i@~gyy`J2Tuu`~bDu=3h) z{!pPWD*9rlkK~*M!+vpGSf6HjnQJc5SzeJ|C~IdPaZ zRE7utC~3LW$UQOfa{e3R;ve(3iXk-m)5SJ3%}j!38UcUUBa#jEiMndP7rIxYXe?;f zSr-t>4NV~q8(RvT#>8eBld6TSNGmcU9SBJx41@rLz7yNk2It9%k%6ql_*EvGt$Xdb#%3#%m zvtjz=Oa({JaaO^ep5rgvn8iR59`Jbl&3F_~{>Jffv1eREuKZ(SDTn_7o_p~3QNUBB z$yt65&35m?SGMa**cpLyC+azcS=_T{oI@Q@Unk^!j@v7zD;{-~*{m>R8EjEfRylwO zA)jp?s2kuact&)~2g(q7BIdV9!5mU!jgt0^65vQc0+v^pKWZiMP3Icf%1RFJK-*-* zVXD(x6YLlmKl#*(RQV#aTHm4UJbdfY)y#(&PKAA}v9B^H}tnGK|Zb_5t! zY_7qQ6DnX^RpB##U`*g-n8FzyS$;|s65xYOfzU&Kq6l*Ey@+W|GA(Kcl(={3o^*f! z=Kw*ON0v~6XUA21?gR=paLgw$j9p*}@!2IyMEBPGi(>ec{Ocl`e_44gzh7LGpU$5q z&Ng5u{s%BL)6}Dywo({-A4W=!ICu|psuEJIka_XGnx{YnoJDeYg4G)iTKI-Gdeyt9fMGEDJS|t+%V$LKv5GHq^ z;v7y=FsPqMW$&OGW4s#@st-KH=gW8!KJmle!=}+K`TUkq)A>sL@Yx{opDQs$8-E<& z#$Yhbuq=cytsJ{&?Sx5}2fhGt_;~;mgKF?)7TJXKukt+rHJ57Q#oQ9(3pB$S7&VGS z(}<+ANrr_b&4@lf%^XkRx#KU^fFSlF0|12zp4^r{R~gNSo0L&y>M-J0a7}?nDpF2< zKj!@Z7yZXdhbz5ibaYMHW!#s0`*)wRb$7qd*S~w~DZBf<;^M^@PG&Nb7cMsL%lDtN zc=0*=moNJ$-#>~0(Bxr`Np^yfL+XlDNq0)AG%PKe?9r0!3-rj=1Mq@^8p`r8@ZKaX z-RxnSl~Ph1q%QfLg_2`91oSA8qF~JI%aDu{x8K1kj;nsS`;PHnjZO83S-rVc>}CTm z8+utdZWvmVPOlkikbPS*bCvQha-5oJ!=v#iJ39&-E!>RwK$GA(58@(o6gic-G!y-- zkHC6-ofDBd%W-SyDyliqpv4dw&9*A7uI7o6k%?xPalb^=Hh#VM0))06mY|+Zq)0T{< z=H3DzHCq$kiq?dT-Wn6sWW$c|HhBvcg%hkX_fG0EsNXja>RGJ5gldT=61P9lIS_L= zVgsEIjQ`izsePeEs~+6^Ih)6Jr`R7vByw$cD#$okn_b<$;!J=39w<0~hoB|U5z@fJ zK6+mcaiU$F(b|BI`b1xW&o;r|u4coKhTtH8ZVeJ*ah@qnD%W6vJnLdX2OAaUB!)JD ziIu?#DwIOoe(@ug)p4qHC7F^I>n`{juwmj0dv^zO5++;0t+lDRW zPJ30lKc4KU^;m2*t;4OcWrK~Hcb#FwzBZ0gx6+^F9AqWk%w2#@0JDWQfumPa1}Tx; zAPNXdm!^=CriQDv@}q=_l)`v2k8oN!t3zSb;!^7|)u0A7rYGkqU6CL}^0_L(+m)F+ zuF?#A6JT)gc?ZPO@gIs`Dfa+cKtF?o&KAR?r7h@5Z(C;W zcuF&XB11q}?svn~3&$2e!pwPU)yP28cqHB1-&YymvaWBuX<#JlNUjaJA(mIc&+Wq$9tfA`PBuZ!r zipuE`-%4=KTP#y5I213eNUFpqO)C)162~ARQ(U4U2SCl2Gk26c5>(Ps2*DoWp0YRp zm(KiORzLqd#1tFMpO-3Eqi_ud>UN$hge}LCx|3x53QKtfOLM5q!gGTN;4QS2f3z%N48)0i|Mjh=>=& z=%baCJB~7nFoSrkCRF?n$cf+IedVezjNNk9EO2jgQCFBXIOj* z7-DsXp_4=|h!JeG`Dx70oZdPpEOMu^|^<>#if8e$BVJg!A_q#icZx4;~csHrUHbQ!;GI&o(An1c4osM3oWOR7^4WgJ6mK6 z*|dO?bV(wNJ=nNZL}!H?>UnkJN~T*2Er|uP;*R`z;`RKg;{I0Uis9D7AH?*7R!tzD z{a+DZJCEnNXw5*=kXTdjd8*{zgk-oeg1HcEnfOHR>Z@}HhSj{B(~dW0*UScsE&0zc z6I=ACT`j);q50c`N||6F{Q#zb4;w)tvSUW%n{1Dd&78Q-<@UgM`<61yeZ(47SV1r^ ze8rd_m@gJ$7yQib-yMl81xi;a31LOdU^!RBZY-aN&aIf5XU ze?sIAVH6oRu2Y5t@3*UBtCAzvbGRfivPxy;vNf|{@g-0e6_#;1dX%1tt#+`{9{Eq&$e?}6n@#zt8qxzS@~)|L{mj~o_%K$`tP z#qc*Sg$vWyG-kE6ChW}a)|LSwszHwc^prZmv)SK8QPo)OZ8Fdsjskgq`J%0k+>e!v1m=ioAf0;j6|oXi*1P!viQ| zTQxw#eq%sdV`6XwV%nDSLsv;Y!~@Tl&tyV8f}~^Cd5{sks=z={ zK6|KO#OEkBZJ}xZvGTxf$GUvcb7IuWMZsaBEhe&9J#2qQbd`*_${I!p#A8~)**rZx z&&K^VgB^u{gKi_Vpm8N44aTCN;8`G+bhjL?C>LYzQVBWB1~1Ag>Z@LB|MFYr9Z zKG)jnV8B&Y5_c|ru2t-F4KwdA@wo!4=kvLirbY1>DybFD7hNR{7+J&TS{jGZW4K($ zyzOg;8cvGUjCQy6a%4qsTlZ*9?4*XFwe8+R9Q07;9&osfF1mnaW#}g36!+^_z$veW zQ+{707nGi6>w@!UdAa2E4Gq=fq;d~_Ig6;?mt!@J_kjLVuoMTsFjN{a7vU!y2!wn zRA&&FR7&FFtQcZfv=Elo!Kf>;bICaL@M7SKc*J1w9I)GpjXnJNihUH>VrbyOFwPl(z&R4+xV2y#Atj4;{CpQOyWxFI^tu2Ugu3$+C-YCX8!)%2(Sg>Z6pxEAm*eU7tXQX4} zSI^vRpvg2?3C}nZ4l<`)4}=0x5F=L63bv>uyTo-)!RD~J#Xve{F$SEK+o6?Gu!p_V zM&$mw>RH@?G$5E!SOUC4P1-K9+N!dk_)vK?*pa*S){&h%rH@W|z+_5w?9S###8>NT z&eA+}D#fr0%M9G~QIfXMo4L>exWm`3IcZgIPio78Ep;^|QOi8%46Fk!tp}!!HA1$_ z4fz2pQvhqFv2YrKsDwIGnI1O>tc5DZ^R&0fD{?5qa_ll>oSZ?mh`*av4(9vi^A>?s z$pYc-jX(kB8)(d|hAAcubwgN*4VZQUK35$yAD$`Ud>LU?q&yav6YJ zm)b2C@yXCr%~Y2OV5b94Aw)V8vDqS-w0^gSnqod*YGJBjNq1eM_1t8&tt|P)@!_VW zr*@rnv8OuecY7-8bpiXI&QtIDJ zmzG*^)9G1I57Rhu^1ZlnEu&>JaZ^}i!}bLwK0ZWjNIEwdgv?toPMlbUxpz`kLFSkH zAXwQw0#SswOpx&yVyj#NX^cc2h!0E*K2QLQzq6a)$S1c; z0$ggTz1(h%9p}L2sItLKb zd}dgS$DMf@drW>=_N$6MnsZczd@{znpvz%q(fo^rHZqM%2JbCy1Qpd&VNQUgt8GV9 z+A9bG*v>*0RGLKVxzvLhaHv9?_U8nRs|ZZyUST3EbnEQO)`s9{eeYPj(6zU2SIZ*{ zYLpd+?@0|bRsyq8BGVAd=w%|FbCfxF1zJgbL&VPRXuw-1ARaZefnywuxTt6`2?z)O zhdF^kHrmn^87f*#2ZoF9GQ5V<0~gSg2(t0LNHa|`2OtLFO@0H%LyMU>I8KZ6$O=o0 zQjN!7N$v!SH^{^1OQfu9f$(HAhyS?o+e4$HzdThW^Z%Y7_$KHf`7+vs7EVc8U|Y@9 zqr9~j8gJO7d>BG$lYxlClz?C?wuZ7n4`KtuO+$ES{W9JGYy{>?DLFB}*K+Yqk9PM(s z0#lh5X$mYbnH-f?c3|j1aBP!YP?{FR=8$WDi_@f$o{5KI8DxhAC_phov@roX z*k%Ux@I!I%N0C$ER#};4A_5%Sz(Dy{1w!yJQ#0C_0T?cc7d*hbgARia0AHEI^G(|! zgiYY@pc%P$r2z)w2pT>B5#YoQS^&t4PI3Myz z*I^QZbS>vYiBd9j1)ITOo+}w-o(qj+l9d^n?Cfci$Csz3LQ-I*PSx7ii1sE+C-@7r@xhQ=-ciA0BKOO zacC1DVn}1m0d?T9w!kx*k%R>bZRJwKoW)v4>;Z76wYs;Xvo~V3MtVCtdaJEtp0>3) z>^tN2w(`BrbEs(<`<0h98TY=aCCTKHsXqPAe2nck7Qtu9zfBfkqN>gk`_0^ZGJUDEtYInZ>qcJ4%>74XlOa;f z5Oir7E;&rIJX6f!2r$E7?#xyeEii150TWGW!;Ep(;-#N0D$E^&HU3Ky%c8Rs=D~u# z3|Z)*vUUDI{#E6*BlKm9_!ZNm20y>A9tN=5YPMDIDxRXPB8nM;{m01zs;@lO2Fx1>Kr+xMk!vBtK)` zN?^qgHeEsQl>nA~vAl+7ou#4Y;AlJz@wAoVWnqQnK+w5t%}AcZD(yKbi;QU&SHo*L zU+Ky#bXT2IFslfP9ijTnFf)q8`X*0}TRdiCnHh{C9A@W%pb^-y?n2+PY@h4uY-z3v zR=5jNWn&wJ!FJ5XD%Rd=)+_EC+AR?R`E}s|f@AyKw5-0ul?U8M3w0i%=5tqRC%4MIauFbSCdVLO;)>kz*b%!j(R;cmF4)5NQWccO<`%|a|NRe zj|QAz`5xtF3W<|)&LFgha<|>4ib7OEn*)q6#6kjaUJYPhR#owF2m+wVaFN=C)=L(u z3YL}mva#j``v{-|_8{dQ8Kp0Q%F0=ZWyOJUm|H6b1mU9`q$vS?)KZF;m3P>P9Cq9L zE2`&-4Ag42BrJ%@ftxCe)u3!&krB{YakAf6*WX$rcJ3Ve)Th+;7KhIPIm{K!G>Fl< znydY{W%FNajRJvqe4yWnZ(_f#qskU+wbS;gU~!_r_koI~%gPc*B*W*~7J5T=l-@_J zL{PT0uA^jKYv$lVjm`0`C^lwLzDdu-xPk?vMrL#Z;|_G0leeg67u#m!IYx!KcT$xh zTV$bu!a@l}0%lrl#sKy0W6-jTqQys8$rhi>J6W>8;cIdX5*UM@VhrZ6H>QH(8_Cnx(A z=gVXCJ)&@w#GV^&SoN*WZ(;j^)5RzA3OJ!4^iO#d{d3Z*#h4Nf+tT;-O9yq<~sUv{tiB z40Z|3VL<}|DQs74A!Rx;xGz(cMuV(v9bW~x6DgYE1y+{v!JCU+SXm8EeEGh0S6#L4 zzAvx4{(2$)^yHI&%Fq1x$2y%@B%N64XVU+U*uPo+cWhOGu9OmrvluoLFZsy0fv|lB z5}pcy2u9QlA}&A?CDg%iC`9=i5`wXU^>_--9bb_;I6z4PVKV0*I53fadBxtZX~YES z$tqsu7GpiIco4W^r#%u^j*P>fH)aH^L&iH4V@7f(774;5)olcVw1|fMZ?V(JPeogc zIMCmczo4Jn7DF7!w-9#`qm}GQiq`Q8T1I7xi5Mi~TxW$AXv}2H!zCR^SYQP)6xJVz zz0xzrA*!&-^DSgSy}r(#l$z|xV48~ueU;eW5;hEr{F%)9dQyq*=Ad7A)uHb4^0eX= z9=F?rT?^CY<=sQ7S4owNwLZUCzA!VK%0CiJx!tK?{*lyhW}#SKuMVT#r=#6pLc3Lc z|3`R=A*G?cN8Lpdtzv)N>Ep9vPJW0oqTF|2Sd{7_RiR4WIZI&=X0Fr2iVRzdv67D^ zIhahqf^nhmPzwPL8Ar-}#}A|LfrN$oEcdu#D8QhPKFgmH3O_7QDaEe)-EKgl(Dk%f zJFM2{AK|Wx<-w-1vZkO|&RqupP%-+R5N+ZlaSv8K59J&IUxgbx8gNugt|jOMM<_$n zIQR&xK~C34X{s3jk=~xdHbktANvZiHUn1bEsZCago1G<(V!hkt5BWpRF8wa?XNqpY zN|Q2upFZ$Ga6z=CGy;S{-;?cutZK1XE!LO|_7K}vH?27!fwjE+0qe?KGD-K!NC9>xz}D}KcDiZR1LZ^+jKYI=9Qbk24XW)qE(K0 zkYLetBKFo9bOya1U;##7#_uq+;kLr4G|SEVMI50*T0AzyOh_?uV&|RvIsO=;PC=HL zn=5ye^x9NZidTse9DB3`$L=k8F2T1F%u{JM_VnIEysu@D67*8e9gkLrjoof!k65bl z0_A`#KsAi7jMq29u50)UK7b?4O2hAphZaC`kuZUtLjVg5wm`Xpny8hGA~+AkZ*4cy zw<$SyB4z61-%zs4%F&)x#qiAJA35@=MR(KaM?@G2qM%_DD78gkF&Y?<74R%a^c`@- zX46i#&~~sbKorymh*;UZ0WA{}lBx!P6^tE-gk;P>k*s7RHqUT%9CsdA?_tf$&Qk?T zPtuM+HB978p4ijN;2T`G;kC*JQVDB!FpD!E0>MP~rR%O+c-?gl>}HF-h?R}F<9`QU z%9uPv#L?-p@7Tc?`?z}Gg{#7*lKxyc9QI<43D<;iD13zBoc^#v`BZM<;#_xE$a; z#Vh*zx`w)kwDSn+Q}wB2LtRZMQI#k?k04;!@vzIG$uFE_!Vv~FK8IJel`C$j`#53; zd()T^2SE`WQvgv8lc3zqzRd1XtyY`05VSRN%py1|%5kx@gnc_%vM}r*6e$yS zyW3tkn_w}%A{5+eZv+{cO)h4Y$0{9dLJ9Z}p-BPeTts#rc?6Q~}PrN{@9jMIP zOmnf^K)x6EyPqm4SuInoWk)nsk+Ad%8?T9H$6k$3II!ABVD$>`_hYq7=~+N`MC)0e zR=Fm!^DTre`?VS1Z_tI@-@NfW)o4c9lA|1ACC}I+zi`}x$`RouM?Azzo=cB=Q0_O* zdpKKvc#KhGtPZ5?!|*3)?#5u$w3gOZQp(xU&R9J~_4zjo=RUlWl7E4Oc7bsgsO#juHufCo7#CZ=-USQIA# zMSR);Kv;PzJ-*C~lAvcO8klS}rHY51Lv9)4G<}LABH0ede(6LpH4)9$uJKwV<^^9moYQua41L`CLZPXooFlAf8hU$O+?14D*wPHvjb_cma$U?K zkOX<@Kp_^s@KTE&C}Yh~NTZb^DZ&RRTY?E}kJgjDvP3q;(o$B27M>!NApz_;NVgol zZdvS!#^L))WaLskOdKo`hk?WBHx}%0zk_uDQDl6^;R-3wrZpMA1PHU6R%0 z4&WT?%jBsAGQWDyiRD-CdFw`XkDh;V2?{S1^|H(_RG8(n9LH1c8C)*X}Nt`W^PijT3FOH3lp-U&xp>Ex+1I?Tm(dB zHrMcig&-_T0G(ueDVW&cDHl6@e!J*14fNC{YpMf2AOGl=Ka`uw%Dpzx-4_T2%F2CK z?Foa0a?8a*oRh$7v;E?}T(u3$MQnbXpKUID-Xh-XYqjM*mrU&$pWCit?>Bj{5sQY6 zcGiP2gYlM39NP&?6c=s$);xebeZ5^)`<$$x6~lShc|@&T8Q07*6(&!v8a;||<@`{b z(v}%n?Sdmv)fk`1WW^yza7UT2=F4nhe>&_m=!`52Q=S!j2lAvhOJ%{XoSHE9P8ySY z_nKjFI3M;>$AY&&FcriN#`r)YYyC+5y6^tu1|`8tFuN#k!^Ijs_iUX$+R@WB?>GyZxAhT|qdc}4*vZzpFsRq6u*12Ig zfhKF>4NwtzkDqp&Yv{HWhI;Xw0ys5ig#(f0<3e%BZi8Ez!X@Q!c|}v*L`g|@s~RZd zKqt@^7h^^Yf)UbhG?1(?N}@pY9S(2E-;jE&+(O7}T(CUmirp&jLKAcEdgB-^m_Za5 zVH9J}*{rL=;>YoAV7;@JW$+nQ#_rO}MI{R~=7ugE7+V(%t{XjK&BiFd9@Ar*N)~Sz z0ZsZ{w5AuNT4t~2Xh|(d8?jEnO_lv5$_?sEQs#(?vvPiyGg?=J-A^=qm7|yovU9vP z5@G`y7G*<%mRq1;$Y7u6;~3;hBEwo#NXf>B^byJJW@%Sqdu6*(CY$;B*fX$lQxgl@ zJflfP5EvNY>Uhd4;@Tpa(E?9_CNei-<6iuq;_UF($=uNPz$d}87>)HAQbfjkL-nu7}fIQDo-Q(ZBUG}Xx?d3>e4@Fjeo zH(JE!*NUh0c0PXi6^>-5G2j~SMVaKYGkI6XNH`R5J8dvh=%)mAaCEe(f;mRD2c8CS zGuW<}5kYl_H;moz%u5RAe;A6|JQ*R?8e3M*%?X13|Px-CUL`(0?af_GXNJ(vE`ArTD65(Fc|VW5bV5aO!j@nj1WL<#LSn~ z<3bToI5j5?9vB&rpKnEK94jdSG0KW5RxF4$T7gLmEx}moZQ^aor4UeBT!AeAiU#H0 zrKafI-KTG}s;zZ5&Ii;ELbU|GE`s^rh`opZXJTR^(mOfe&2)Eo%9}fT!Y8i~Us#lX ze$gV4UUUEh>w*eyd#0R>p7Nf`nyREnSFLI_MZPLnGPEDTgk6?Nc|EeH_D zN&p-OVa3uY7M^ODKU#6#q(-+mMTvvJEEbz)gJZD~${uIJC#1rVUxxv1*#*+!F2^eF5o@SdA7H5TL7A$w!ciN&6mId!yoaZjBO$JU7)(_cQIi~bohqEK83gCUF2Dlt&a4i% z#Q32T5Apoz{@xwh&R5yqvBvVM_R;2%jhTQwkcdR}PgQMg9o5++C%gXI0{@&1{eIII* z-~HHi1n|FHxkx;y@Y=2*J&nDsokh7w#%HpcvWraFda1|rP7al9CvHu3E@W9jAC16kO(c{UCx&Q4q@9^;|-C6|2$jv*F99>?z zcLzs|cJ*YlcPEloPfZ}$;g7dO7lx~cs+xPF&L(Sr=Ze9RrCXJ>ny7Jwe9rQ+3&#=( zr#)L%=8d=2?^sklr8-{ha z0|*&w2$fOTj8)3bw{HvT;B{g$92JY&jv6S%2+7-4^Y%Fh^9b(X8Sw`ARc}}Jq3n8N zubG@?>En5jQ-HBEtJlsAWl`k@;myCM>|2bl3blyeipA1zSx4J)kQxOA1I{{E9lR9S zcdH^u3~p+0M}+W=1N-V6Ze{h=Gv>oDh_tZ}GrZ>MJfGW{6*<^>?uBexZ=4IQ&kqa~AUsP_;B_o+0)a|5 z11OxXRyb1)34bv&E;bsNy+QAxuemDo%wtMa?L(DPX%e+Jm=kE&I9fO5ob#sn4MzC4b>Ap zglz$9BeZ@aB_=S=pwCUl#Kf|MVQ7OKvAwIWf8jy|>Wy^+h zE4J_4xqbVtU8tGop_TB388U*?hKa9W!w;62@}-QR+w+GFvp(`K$iIdBY>&&(hmL;U z8v<5LEa1v*GNd=FFov+vp;9(~@yv|P6(DhS&yyas%Cj{G*s;L|;3#I}+`3Dv#A>Iq z6kI&*kk5I`$T8G5PZ3jwO7ONPzKs^_eTPpyo+IBh+w)%a*BB}bxy9wVkQFp+bzAk# zg`n=s`CZz#c>N3SJkO!w=wLGrb2GzQP&5y)P0(h6pQB_$t9deDv+R;wr5Tt@bsbp) zXW(QAHiouRG+OaUuvMiPo3!vKnS}*QX)Kyyd;TH{j;?oBf9|iSxxlNixdbd zSyW3&*3w)|lYK|2i*s>WlQ~@)A;o8@fLKcjbA+{BqqVRx+F)nlU>9z&EJax1rm~u` z=aji={;~?z#lNC8xp$)-MS9hCC@mq`pJ;)r`IqOAEyuCWAzXv^{ty#oEhUxqT(l0p z=DK7ZyJa94)>O}qxNxzK#UnpbD=c>K;5l&@MWGBam88IQ%TqG^`dI4EW?Z}7iZwop z4gX}gDWk*KQHruF7R3%(uMnRxZ;_>?F`BG7c4?g4YU7RN^q`Mfg<27U2ayR#%jg4Z z3eZOmIoqf=zD>zcKy6lstr32f65;f1%P%vc8~z3@|C_bBfPP1biYaA3pFC=S%>A=5 z^t$=NWe$I+WU4VqKlNVSPaUSXxF=T~iMX7Rctol(HP{KWXqlBZN5Y=CK%{%Y*^ ziRBC|8nKs|ohZOctJPZR&{Y~k18vo^zYG#E3AE^4YfAxJN(9bQ zXLl*K{MHaO4cl|;Y$Q3->3v%Xf(qgyLZ)Qx zK{}ZcGjJrD7@9BJ9hiA_8{maq184%Sa*Ps6Gc&g2W@qNl?Dd+rjQIvxDz=O?V{;0c zjWqK$X};nLIP=%H>)mQm0dwSrQJQ5jTY^QUm~&-SFc=O8gH@f~EiFAgEiK(Yn~gd* zRtEyrkw74lX>HAPwYDB!S&BId>`$bA1@;jIJ3QQ(tA-v)(6NkVkh-TGUzLI{qYW|% z7C)b>G`smHDT`GX&rKckl;1WfcfVI=4B4*aM($_yYwH`|5Crr8K{_&fO)9D`#0+6 zzy0mf6&WnLjy!V?f~4_y^6G|j82*N3bn@l=j^)tu?98FDK4fuPpZIJJ2RDg6Y-%cn zdPHzeQ7E40|F8H90PQEz91BQkz);ZLuu#{RrvjSG>N((qJKrT?yX{kf} zY09X*40y?hSzhF=mntK7G59!~_i>;p<+*(yi{wcb=3Rv1wTWd{{7i6s?kG&%_iL2V zBXnu$mJSET=>C824A@Ab>3XcjQr{Bsx4^#I#F4Tz z5-VCy(rD!NVK7qrvIe8IX|{o6rRqNSQQ#1_!N%Uq(AoUXSJ zgidkP!-O^sA(^mpa=CmJ(%6!5B-g|Av@b4~m7Z*qhz@lrH-t86)mrwB0ohIid*8|a z_h4^2peqG6?y|YzU#e8ORk#cbs#WeM0!-1l`UJ$_Cqyp9L*I`&ecWy&9zj|n^a(hh ztt@5KJH(Cy($EzJ3#|GM41Pzf!v3DWf|kRCB|TewIPl9d@2TG^!W(f(4<Fe{xGbAEJYx}1kd5rvwRZ4 z2H}%>OYaMm*(U+Y!{ zx1$ra+@0N>U~0ZU!DhfSA3cV1)K>rE;5>!ED2RX;43)N`Z zGhk0aIb>2KCF_fPX^MT^1ANK6yXGp%X<)^~4&Sj$==Nmmu$K&vdZv=CjBiw@tUvXr zPn*;)?+McvIja82?d$15q;Iqbj!4RvazCW4dRZOnbz*Ss)OSRp-UI<+aYUm02*RcT zjz!8DqjMhH4G39GsY6&$LJ5S*#>Q@dU@cvYA>_kq{fAJg2>C&zOMsV5?C^VORRk(b z0)_Ii1cIr)VkKa$4su}IbPsrg7NhDo6f<^qwE93@f8x9CROxU{U*b~_luet4d9UY9 zi#*7IXTF@hsAvJUqr<(HUY5s>iU2|(o5UNLQ$jmNGHa0tN78QQSPU0c!H{C!bMWwm zb${+)yDlA?uwhslB7r1B)(lnkGrk4V?9*l*wfU@?ODz`5aWj+Jd{)n<{+IE+X?$$^ zy&)N|#rKYZTBg;yi%C(3#|Qg48_Gyk%jM~&F_sGpk0i1MCX9=Z#c=U37&H=i^N+{} zg7%786NBtTQZ@|Jnj?Hf7Lvm+1;iT*mV8rV%k|bPv!{3O`K_c_WRob*me}qhzo`Z9 zxWQfXSp$c~=-3vE4GvRY%!E#bJ_4_0$UVL|KRGcpP%bU3zwX2~V#36$5ZVINWD=Kb z3K3Z;ndo-U?hHZ_zKA3+1$`K3p|d%OBCr`I6~TM%;+(TxHgc$7eeuAKriy%w#{RcceJ+g!5wtiJ3j%dHeQ7i>POF506jFAr-8vyGdV7K=v;W5a{(X|G0)8Rk=? zU{{AaTVMvL1teQay$KxG(tR9PEGbCR03UMEMAa6sC~OzOK=g*vk@EpJ97k|Nx*_B0 zJ)vq8H8f?eTx@DZ1L*O*0dV-?AQ0NuI zqfqHb@jTj&{^l(BYj(oEqZabav=bi#S9Fdvca*-ETc0_7<789mzNVa-%bl9$A7a_= zmS|@6{->|p+TVF%mVZjG-v5lFx8P6emFusVETuX+i?_exhK4gw4-O5iyqUjj$*VmJz8Du>4sFKKZn4}WJ%c27YxpZ2S!E=;HXmp8#rDZNAxo0W{9>TBCLIjWM;aomcE)1B8fhFkf ziKZh=uBx|8#3LznBCu?`H@VY+hj;J}Qm2+?a2}8^!+@!nk$wX-u$CHg(#6A2+G1Gx zzugZy1sMGDFk#R4Lx)M7`+=so|NDuBhSdFTrxX@qVrX+yd?+zwG(%sqRcMA0D*W^U zU~(S%kT9yRr#fmv8mWF94ZZ}Xs7&bjNi zyZd{uz1cd-PNp^mf3H%-xmfGct8|*DKD#CJRiS&)Q~$ZpXD)<3jN-%45yW^KQ;?7s z)CF6eGB$;)gTaS+tvFPAi#%;aN)uRd862iEQSWIyMY~XkbvbJp4uN_9VU#zbj0$av z=su8ML$C=^mQ9tztm0y?OQ%9H73u@bP*)DO(?xMxk1b*H_WYPSap}UT{Nl|kE4M7> zoBNK8%oI}j$)T-gm^2Q|kLUYZBNv?iygIr0A?K!%HIA%Y8yT1#?M^lh47YWr8zX3+ zw#87Bo{7QERIEMU+cLQdy+dY=OU+3-w4kmqYP>LiczkShpsy>}(xf#L;j7dsPxs(3 zx>X!W7iefyKoMR9~AU0L$Zh^|%;WuPc|E*p?|kweCv&g~5rV6ZR= zu2x$4pmcCY(T2|M2woIPi%}#kLLWwBT?zK1rVP+}ux-@ApnCO&8h}Jw!8eT{m)(5L z8la*vASOFo>+SKQGpRKfjs*b11xhB_g{Hh_GG+=-mX`hz+RX3{XP&dftMsYe3_h>C z_dEU--}{*w*Hw*^HSRl#$TYYJuASqtYhL@7aJ%g&E7J79TfdUE^_yA`BcjV2|MmK1-U>JBDfNBdCQm2G#zi3!w6M6=8k zfTuNtWb6cZW&%7@5~{4v4P+l?K%_g4vT%6wnJ_@Izqk*807R%zp6I4%j$7KluydfX zForEaUPIh->?NFj9NP$xELuV{MjEr!^KPjF5(nV zPvY%V%4(cySr)?{USQMTug&hS-M&R53AihFK{Dzgt%3gfj=81yFZaMNKP`O@Cqft1 zoi6$)T)h6&$>YbC7mv&oM$7$~l7&y|xo2L*6Ghb!pBQ&y4JQ!%L6yJP0ug;p+v(K3S%D3E$P3(k4EY&l-&! zcY_i)*7d+%-YJ&U>@#M7;GPm;0FdFRc{#yNlP#Id6*oTfKC=vy}80Vl?s znMLcM8I_EVjXII!Dk@%pF-c@mB_rd^(gBmA2M-q&aMA)PVBF%i7_UUnal>oR?-3Y) zNTWF3zqBAUI@ZP(0Ac#yK*`3Mli^S z9Y^~uQOofLY?CkY!?Snn0CGhh(azprZSiZcclX-u+UKwJOun7#DC*sncHqiRiRwd@ z9q~#J?VO;XsTrEmJ^n#cYQ*gbQdC*x0O{%atq=h@*3eLhR|qwe*P@%o7Gy;#pbB$& ze=w`4E7k4Zz7Hm*K`u~5<`p5|$*I0%gl#>fR$1W+41UlS0PaJg=iPzDvQyrc{;pfk{e zCxZ)YKGxkM=-g1YHT3XglT3y@gw;VvqdhEtMBfDT!l%4!XhLKJP1EsX0*Pj#VTMvD z;+MZk=&&e1{5sbI+`4)1wpD=h|AstLNq~_72`YdhXWt&Kr(Z^>1Cf zp|gMJ*0JKobhdSTWBKjsH+!bn1_#z=dLC=+=ufu|bhUJ~x6C3Wv@|COJUTEi%0907 z*x=f9cVp{XreS2Dvn!JuUmEBwj$zy-dIxW)4+EFyLvL~$$3}bM!)|IonVZ4b8Je73 zu&&f>Ug{Z`SwBgsPyU2H;xhP7^D6DWTCBYSyaJd>w5VUruvFa_&O1h1z%U#rcsQG$ zz3Ig04V;2Cv)r3V^e)dVobK)47&es)*Zai|i)RkCKDqeJv*R))YiN06baZ2Rs42D9 z)Syh&g7le?I)~`wnLF_1KjlvabY!Pspz{Tjm=SRTUQv+w zCZie_ zDi_GkL0aAXpmyjU;NC<7_w%u~M?G)8)z9q=`{>iyp57xKf7LeW5ORdR;r21wwsv%( zI5Ul&kiDHlT|>TIYV&xHBhe(jtWW|=9VMQKQXMI`>pIXzUK&Ayy#fCco;DJkT(uH_ z62s#t@FsV)08x09u>UTn46#3s2dONE=n;MgBC|q8t zG8hF9LLoT3&R(zy-RhXs0#AVBH0!Ed%Jl*NLF7S&*>*mJN^l$Q!3ad;=OBv)&moHj zPaxX`9q@pbjQDAQ{O}@vjN)Wu7CKwJJpgHZY;pN?CP#6*`H4An_$xgxpFzIQ-Oyu( z{sn&zNWZ}MfRwBKJ+A&GzDEJyBZdh!nDGe(v<005ZKoNVeSL^RL1kG|!QOC)272KD zTny79a4}(c@Wi0v#T2TZXGg$jfj&w;?#UdRd#E25StDf|%$tZJZz3upkGY5X9sKZ9 zF6IKs4=|Yhl#9AcEhS469Iq^LCvSB3@GeW6UIC0nES1zg?K?c=B6km$sO* zNAvMPbpz)?45AxMu)@#glMOM^nYMuMIFv5repz4@!=u0keoRZjHIy$6^a(Ui zf(`Q-=pE$bF_8dNa5Zp#_}LQkW>ML|2Qk0vE*68C>1FH*BYWG?8{Iv<%U(zwX6ciR z>aBEw{d;{^Rcvqbjg~EdFQ5;tH!o@0IpU&%W_oJk5S#f7)T*GN2kSyX#S%ke-;jBM zJB>@v8XGczBt|qqyh>L8gLfFgGTb<4$fTedieDJvAsvIO28qMUNOdUsb1JqdyI9d+DzmrGQ-Q;5HdQohaO0gd;yGcV%v5S=no@@sZ z4g85x?3Rg%lK9EgqWYkTRM%kB?lsu1#mhRek&HO_;>blQ<}k^o16e6s%)Ki5tw^t2 zK3jVVR_V{#owRhfdSYV!(z()48r%GvBGXul^S&)=aSoxp2A8f79Txd)+rmOGw22{7 zj-&08VgLC+MUG5RG?q3b^$sd`VEizH$0q3Pq;!Wi_3JBV`kZZ>xykiL_uzA;O_D5r zqhfT=#LC@g#>|`^eB#>z)9J(bS9VTR2e!*_PeMz$Cg-GUeO|R?tq(i7K);}(*!*dn(c~4XOj`2EuXw3JQdRA@}X> z-!n+Nc0ei;Vc!9<7+T@{K-LI7xlN4_<>AT73LhyT^(uqP1)vpGWzG;2#@TJLQI${S zz>1CJ7V5J0@@ZvNm524`8{W44P1TNaKX*Ger~L@u^z={G8r`!^t3f@xh_^@>taLH83OOERoG+|(%rsw zKb1*>A@!8f?FhHZfd6mXBSw`PTl#_RkV0hNTIkJ~1yGRQZr!1?Zo0EmSLvI`EG$L} zatWH@v}K$enuAhBs~M<>!ecT=;^~!_DW_(o57ocyZ>^|L}hJllAq0 zY?fGJ2d1L@vDvWx>Stq(f6{;S$Z}7-^o?lapR^wCom=jX=epa|(>J-}$8t@_N4wjp3*9i? zbo|LfXY!@bW&0M+PmG_*tL26U{P_6ry~n8rwKlge4c~i=KlWzu{T$XKs?UhN*rEP_ z(!C>VCJ#VMLJUVNRG#+~oBX#Rx!cL2V;FvEJUcPCv~hfN$T(7~7U&v%WSd2HAOU<2 zQtrXD6S9`+8Vm6vup9V@h=;6qvMwT{7%cg#fW6Y$njRx3!uoNHqz$a+YP^#lzAxf9 z*2naB46erz2DIlz#<_-r?{2pd1Y`XYd)|h-&kcwTLOvv!%oPI;P>=OpsL?-IZdoud zdBdM_aOs?S-ofJq`^%Jd6W=jjfFO5{7YHS4yf6mIF7blvaiM%CKlX|jm0)&@7nSS5 zdv=Exl>nIMjQOy}J@W`&tY=+IP8{i^<3V>0Bwh{r}wvcK< z%qooz$KfnBgMi-=%^o9Q8`K7E-2oK~X75P9v+Y(&m4VJ*hH(M4#PAL=4{EK(E8#d} zw=+hI(XP76hq+e1W$+N)kefANVT7x}MaqCk_OyoNW6K653Em&q-&>+(EU#uS0s=x& zLnIEjxCan_XEzmuXG3NC+mw4y+p$Qbv@29q?7 z-Kx7tIxx62@Bk=ChA2xd*lxABXBF$~BmidaqR* zJxy`E9J{LZ;MNL(77h(#w$fKTTc}?>;6mj^Z>s)I%xCTn;F+yX9qH?W#;ctfK_uf; z3}9I_+Q_-LT2~qKp5ECGzx^Q<||Q4rxY40q|%%!9lPw zhGWoapGk*|hp~pipa?NeW;Fq?EE??o^_bGUpBKxOL-f>}lo-cQPc@tgt9Mmi^NB|u zxmmr9yqu{o?dN0twDD3peD7J83PIz`oSXxUk4pWK)mn(b>^ zIX$;A34;lJiz~VhQ)&* zGZ`5lXpgB*kKs8F8rtCP|0?e0HCxVj!oA>}MGH1L(}MJakhsDsC_#|F$b&UzFBfRc zua}Cf&Q;|XJC%9E{Hh&S^XfDBLg~;wLL2E&y16lqsUV)^-Upwblfuh6#7?V4+yxJ) zQK2f_EmS`sC#45*@j*VXXXy^InwP^8?Eumx^~obEUA=9+4NW5*Q_FX>pD5&pqT$(S zY;NlIS)AVoLA&1)n>QZ%$1QqoreSLeUzRIE3mzZC%YPHV@E%?!t~Zp&57;Lx9|GY| z{+!w|Y=ZMz$I-_#tl(+E@FPaq7?_Y&ocz=wS>pKCFx?$OGgwX&0JGHNAH4j`*WZVK z88ji)Gw<~~&L#L=GNF}UZEc2%WjH7q9GoIbHS-AS&P5~o>U32-tC9)6XPMGyQXBax z38#U-gf!!OIRiI=xxnyKhW?W~Fg4lLnM^J%OrDrJF+A9Lxa)8-m+WZ9tajj$V>6ys zXp%=92wWT(MIiASrTiFM%^!7m6=6jNLeYN!wAO$qtEhMGNxK%kK*I?MMb?6Xm2u9+ z%?Cgbl%9M`S{cp){o)u&UGzrU4#(*z4!HcPX5K#9&~f%`N5gZEy{UUNmrCVEyZg}DnojSS^GiAJ_} zEsOZ0lduQJAf4fLv%#Py`r153C3ptf@sfmK`>s?hgN7B>>Gy%_s>RG=L`crsW{)9V z)Lq#Xup>w3o2~FxKf9&Zb?RiU@!~DZH?+5%KY9JLma|W8|)Sig$vk_HH^7y2@NWZYf>s8btkL^fI%thIR$ZlCS}?~r_y>TEgTQ(VV$!-Z`r z|MiWzqepX%&p7t##lz{`iL=WwL-6LalJF;+(#O*m?^PYevF?e67vK9HL+|FZ2HlgV zhR;DqsL9JgZ#&`T8I$2IH;`E#L7h=DE_EblqEFWFW}#|tCy4> zUFVqRW9l`>WZ1KV`MLkwNjv{{Xf!kz?s7ZF#>VEx=4NN6rw&hygDdrRx3#1a;!zj` zMXX@3w{v8&FwhiDgjvsn>}{36tki@8l}Q6_pfp-(bYMa%NQ4BYRI47FI|Kly03hlE zKrfPV0GgLIp+OiJ9=kAY>-e~(e77^b%t7TA-I z^TsiL79-(de&n(u8KZ?9hGWA_%nmQb=Lbd>CuWajij9qj(W9nkZ1M2SO6G7|^P&8q zes$?qXYlaTZl4%%ZgoD8X$eP%h6bkN(cVbBGnZ`-NBahbCZh4)XsqKyANsaUa&3=@56R3eM6`P{Pb0BD&r07upI4hgE!fT)X<1M!01}vk zRQ9-3SjsKs;F~POg8*IDmSiMk%<{*a##8zGSRSE*w6z-u0@^_VmjT%C| zn4^%gTn1A}6~0i2*^J6Y;}}nri)PiFx>|Z`)_Kk5Te9lGl2UI~_usbJwBEFNo4TLm zy@q#9)OZ)~gRjZ(g=sv4!svnuPv8X)W;fsBye3q9Mhr?v;CnefSTA65f-dYC942O# zmmo0NmOh$VJH$7LB%siy;Al_5{zDoB{O^d}h!n6T3cw<7^L}5N8*A*a3LLObJ8dff zCu$Tnh;U&&%rna&ORthJlI_B<2$E=jpG4Q3E#4FmWB6{ckt+%)Y;T%t@C~l{-nDQO z)tmxMOEy0nJS_4on}sCf#Q23JPtK{Y8QDrr-PKxXvCiks zyNw6fd2=LEICB3|>6H3pfA90pU1CmxW9cFp8g$SVy}!3Jhh0EeG)sL9I;t2C*m6FF zDcD^oO?R?`N@XC;fJbNz!h}h-Jp?>9?zL`fm|*lG2RX4W>57Hh&@1~en%RX)7*L*3 zQIGP3En#8E7}3w{ihzBi@}>YxP#mPG2h$7}t^pe{dvBYyKwHD|K)35&gN<4=j@kf3 zJ=ArcT@zywsc_!9d4E1r6HgJ_Im`R#-4m2TTi0 z-}vu#3WtPnY$tFKdr8tU(i3oEB$@(qM=F58Wr8QGU4nLFp}2e}Hl)}77Jm5Hdr`yl zT_SOO#ljX)wNs=vRExM92enK0VE1S7l?4vQ6}lh{m=2Lb!1qR%Xuuzc39_uil-E;hTuG{-`-WE>|Bo7rI&A0cbR*J8S=MD`H z9?Hed=Ria6!o9L#G2cKZA;%KK#v02>yV{!v*xvL@Qh2(=AO1 z;41OB^#ipwkPD_zXydT1XBc^Kkt`4(iBViS1wcHGbOkhZG&UeA44|mp!m#)1vTkDP znQCx&nk#8gqE5FCY~Yy3)#1fsRsyZz>#Ko53n32$%dQW$<{SlU5kGw^prKHsYKS0Z z6e7V-e@j8>NJ*10Kl`BOODq;2?9Sb5x@W;`+8_(X;|L0RCNUbw4#FZq+%(Y81b7WI zd11|s_8qhE%Y7|BhXoLRiSckqtUQ+FQii@Iy-z3c9C z&W$xUuccc1mTx$+e%tzy8`ugCv*5@H=3aH# zIBcO6{;M|lsZdh`zdBsRT!$Zn&Y3X7YI7A&0$~`10?J^G=&^ZOOl~cIp(AXWKx{4G z89>zSPll$l`PuxPx88auKC)9oD=RDN{qg8(Ec)zcJ?p+`Y&9BpFI+eYXt2%xjs9wq z#=p^DodGXw%BzzW|6ij&Cv*mQJ1O%6UG<3WX}`Ks812c&-Gs}m54yH(*3ePrWF3Pw zc6`u@h6fPtim)6Ce~HvTj6PzgDX8+^>?5e?{MpMd*PQ+2HV{J4taRQWJdqMWV_mg| z!w=uVc?OW2ioH-k$QTMc7HJ!3)~k5{`*IIvM;=lr1&H6dZ*W?2hSfL&o-a#XoFJVgqHB1^MXUI>ceiQ%%E(IBWD&tKFOLb!=VLY0JB$_ z-xc=JM^aE}XcA*!?dd7ycqllBZ*GglQ36EvBISYy1~W*E8Mn5@N_1sF2#1}T%E!KX z0DYWuiF5wm%g{~6C~O1BqTBv<LRXYsmF=L zx^eMvkD>I2eVfTU;1NI~1OCWNctpodk9&4%pgu@Deurur$)9K8<07CZejTP!QS<`(HS^)9v6o5y0$J%rH|%DwwM*r zy)coN{%uXU0pQ&fP=|SY>;>{G+wz)aUVMOFz5INGc9u`Trm4wpinZ6nps@_YFr7-A z+aHB{DUo}lbN1jRC-fXnOtISEZuZD;0{f`Iu>imMQIFqzQH_?qsa{eT z^*fR!O1Jr9U>VwF#9+~LXP^~tk4~-S5RE#?bhmHa>-!8AWay(dD$MXOuEaP^=(zs$ z$3FJ(sXG^Qxy3tAJ-qSxr@!W0U*EacJpJ=m)K4#!{@e6TFFk$gWj9Tio=}%op8fFU z%MU+$MLi!XhzdP3^bz#|^(2h=*hdk{XgIv&$AaR|eDJ{;^Uq(IAMiJtr#L?%bI}@A zoc*2}8akmcxquyQh1qOO+YS8M#H5f0_~n^JO`X~Ci$?8oAYvoYdj~&ikx6) z$X@M`JcdmDz-vv5DD)n}qxKs6wt?vG0G7ETg$&9J;s~n9^!3ZApURvqefzereC0OI zGTYoN{Q(5(uazEBuLhRk3=uDMWRWm832|dO$L@YQ8Owv zBP1Vlf@WUQP|fEwIpgu9Lt~$PvPjqv)ir^F1t0;3oB&q7Lkqnr|cs&MX8|tV|$m!+^q^ zmk~t=CLKd#m{qv2g#Q*hP`#@@kQ!J$GPl~FO7*YK9a$YnUFh zR~GU^+ZPnmyL{d5Uu4LeNz$e;dUV5>3o3Gyv4~5$n;Q}=2-D3rCiq8+V z_Z~kowZLY>3sVbE8#q6_b{R(W^yRhD)5B-g#~T_-A8Tzj?S<8J>Fe2clK`G=FMVAk z3-ng!rQyE@&c;H`3Z53RlAGhrO*E2icR|rH=5UtdsNIZ`>7y1MO^hhb=>Y|v(Mm;r z9gDFr>_Bjuhd=2pK(#4O0X>guEDWw8b}yB$;jh`~Ik+^SOM!0?&jaP;$12t%sDx$( zDCCPoFo-j-fm%?k9kl0Nxchs*r3G5PXa`>yL@$D(`aP8{J@uW}{^^<0*Uns0$vL!Z zfA7hYoA;|%Zoc}D|JeENCK`r?y}25HkDoBJEOd37*;S`%%!e{FyciS*)6w}MzA_2% zKxNXF4&%6l$Ts!_s>Z}Jx7N@qvnfdDdNay``$bBK=fNDz?qW8|D^>cZcG|fA7fzlm z-MYJ8{hHRk0e;mU>OmGdTK^-m(33?aBtv1DH1l_$-?gbW>2FMWn`u|b1VFtG$Ol># z;+@d(h3DwvWl&u_j`qe0NBmLrDv|2u%9f#P3(_c&BVJ>9a3yK)SgnonA10YG>Z__CZuvCz5d}PStFj+iHOp zB=+VE1x_@$UvIt$yZ{L=Pb~=EMq#hT&fROYXF+u;;+43*i@_+NDq2|HDStg5q>|Fd zcFt+xi@Tb49z^Wc33aP(w^?&n)*gTsOeWY09W+_!y^R=ufx#gv0v;Pta2P|If%NpW z8L&5`E?P~o(Gk}#8S~=y0H`S9sUFdb2uY{alRwv89Subhva)-yAiUM?nR>64g>cY7 zzh`4f^CfClo0|EHMWirRRdH%{<4?cyS~V0b=e+&2(K}knv4X{&cg*}(=pT3~P8@wD zoCVP=$I+u23C3e1w7FAAY=ay?mZ{On8{z*NY=UApgls%ynt?~b4iPB_txPNCNMd5B zjpBtPsZo=*y2UcBhl*^J$ehR~SxgvS*&yr-j-3_JVtiwUTDb*aa$$$7BpC-}B!EkgbB zjzrityubEHbRc4`?sD%`fX^(Sz)H`OL_WM6X5gK{I|wWb9422&7@mnPU2V*1GR@PR zrdJd^?R*!7u76W=G|J!&qbF{fm0296->5YVrMW@R;qY-Ph9SpqW=v7XGM{0vk@Y!C;iWksRa!iJ&+j_s`h=o}o5#$-s2 znUn?$Lp^>(?le%YdMD^00Ff&l1Vk~CepYpXSxaR9yJAFU|NEgxpu2jh$cnqLx%t|3 z^xN8j{=uCP5`w(jkrJqZwXPonEyY5IbZl>Qn11jYc!02izQzIK!Jyy)Q2<0|x~KW5 z0Fp9y80=<**xpt!0*K&sU z*_YzmNGoC$Y3#!e`$A38r=~&^n10-qt2(1ae%Vo;(U>Q%fZP;IGJVsYQXO%1q+&H* zOdpbG*NT+Fkz0VIDt)|xH%hN#?^XiKYe}n^u&vGA1j^x+Y06eY(Ox{+0g`A3=KppY3n7Es5fCc5(K6j-v z3ii7nPkx!?lC5dXY4Re#FWHvFBu#1aN46%fUCJ+gU}ZcO8cZl`b5pD;Q(u17g%P|t zPkIr}s&Z3!;DIfaq2e}|U!I#%0cii2wC7#-`w{Rf$Tq*~!N|O!}QI*@@0Q zY21R9;M%T$N#7$mZJE%3`aZX_9r|$lKs#a{nYN_No+-;}n}Uvtx)FPGOazkjMH&$B z2kE>>xw-Uy;@D!{<+o5r#OF5DD3<6z!xG7rHP*~8_D4|o? zWNs5io4rwTbzTJ~+lh4`_4l0c}h0QCxNL;id3c&<<}yaC|O@+@j%vKl#;p=f+Iu7X}eE z)WO5);{#3I_np~5c5dPNaW9IBmAZ`AW1Go@&BNZm_+wj6_0iHh)p?2jf2Q=+b?5Q*(vzOffcemj{c}F_dv-7%dJT<^z4_2s zC1atH;28}3Z#5p;T?#C(p6Sfv^aer)L8_d1J2Los+YWmD5tV*;M*}qxjsM0rBQy4UYQO(x7A{xg02WL zlh@(_PPA6N62pd*7zhVV*xx{TNr(pMvmZAQdS?*W+y9OMS$?y>-MIVSht;*ENvw^y z2Ps2MR)rbgRYoag5%v+NT?Pvb3)`n9({ueiI#{U94AZdKL<%0;|jgI+;Y6Ge@`B&~+ zCja{IDo|+GFm2d-!-u)QJ-QcsNc-Mh9n9L6DS7>ggBczpJ7Q90A?=5^VOP+rY3nzI z>95XFo~e-6)&v!7n1&uj=4nGfo>9Z16%O9y%a`rtSjrdRr>J3__KH{5&^S_jTzI`=U83#Qu|JJ5qgPDbova`Cmpka3fGMrc6B zK`W#p30gw)BW)bRvjJ5#21^pZD6?vp0x-4_bUs{mDs)crgR*#LRUqYtX*U%b7E&)n>tT)sl9zexk`K+qA-THxVBqoRf`G;aIqbuW zNXHXuY)9+Ez-=J!gPW>`2K|krrz#f*ao&8R02qmTNQIAl7w)jrOSWLsFJu^0_RRK_ zJM~Q>e=y(TX{DdidLBBUeRVyUZgD#bkwoL8?X7tPV+(2ZF?2W&c`fp!qZ$5IdVpG5 zzl6`V;D!ZgFp_qIzd#>v%(g(IO{&JblONKthALFvfHV zmoq>?{|UBJ;oO!o?d5Da5dK2gpqd>A4HFzxJ0y2C6FB~>ye^9sa??z?MNQzIK&@sub(j2>voAn z%&PyYu@9|%CdPD3eKs*`_>aXtj6@2lb_9&GNCO^6OP^`2}1k4xvU9@eTSU^5bt z=|w z(ZyLO7T!49jRqdkY>KV#z%>#W6_4ys)an8k=*1JMhhK=B5=;+XOftZDJRKK|Bm2DB z1Do6x*klrg+{oZ0I1WW915IUzipq^U03AvuB8en2`4ezBCD4?{^Xv$MuCj$ zkV>x91uS&O^GJa1wgIhYG1#5+J^mE>yGSuO?doHZpdS64?0mAv*Vrn6YxzywYy2IA zX9wx3a#V#$t7Ze0F|==kmo(d+lUC8qn52OA&={hyAZ?;i@n|X_i&<}Esrq*w5l$An z6N&EPr1u%`oEXex3P%g0XBPU0r(ZnQlV}+G;KusIxx40{|H`(WvG(S4N9)vZdg{VT z?oitYZ)t79kIo619QT9S;i;~!so|{o{B$gxAL;BF$tF^r69Z$z(YMDE?aARtdtz;U z+8xiFySeL7e=ey~{ax9irmou-3a5*G(dbxb=jtGT1YPTV7|M;d(5m`#?QvONT3k3X z&0OL1YG$>D#|7QJW6=SXjLoZfVgPhEHsoY4hh7Z1GIDf}(jG(ypDFr-#po`D8*bNXTXLvx|$s#m)M^F{h%?|R0acbr}? zK6n1PhsSZU{Z0M)WF>Y&&HVf`F`@TvxIb@NfOdcVg#maA*y0teh$F>t9N=iaa~2*! zs^O^Gi-ULSDYIZ0>6qo67XdJ_3xJ6P_@>G0sRU~H=7pL-pM z-*loxU`+G`zd(qHaO>94m^pwisWG7@;8+ZJ!h$A%N1%}(*0C5wivuTmgF^>H>ekg< zx@5@PeD0gfsaM9F+fj(tgwB7z>E>ISL!tTN+>xn?p@FVk^HR$aeuG}a)*E`R8#LYn z8lMG?FR-rxEG2j&wk2+)7T~gU-*`+;Omw>!>wv0B;`B(7tkG?TrWx+lf+JW>?NL$k zbwmAlys!f+{JL~&8*}|*GtRaY|K#OZcN_1vrSZA{usz9ZKeP7rq}5NkJ<=s29S=C0 zjRkIfJ%{<-nAPd|cIbaYZDSabOx$s?L#Jq z*)1V}LvcC?4M4edN9(E5!)~ji*mhHTO#A_C$J~EL`|a2j9s?Ey2iQBNBilCY`RQJO zG2UxEr%w-@9lmn;tVV{?97lJT?gl!bP0XcprJn>-^5RRk!v_~eG2`>yy!1J0i3Ad> zbY7L^sUyGuG||Lb<`E&U3cQXZT-Z7_8ApSn!1SoGn$OBpBvmd7w4j^CpE1*wBw6nj zY+c0V%&F32T$$0ZgrVKF)}a!Uq#VO=8IzdDg@{4QdfUEi_b=Y_E6p7$wtmqBoE?OZ z)oCj|$|+Mqz3S&5e6Vyy)9#~>%B(DfDcqo4+*7yuPmBWMTS($&dz#cMK|q};PTHLX ze++lvG@UFmSTI4_yCp1>WG3J`1F@|gyn#USTitaCt0Pk%6v74aa+E^q7C>}BuK4f6 zF8M&d{JnNPe&yXfKRa#Xsxuk_<^f`QX^gFanC#7Da6}6n9WAiP=4G;xVA=ec`Vv31 zkA?)hkNvm@vlDaA=y1=J$gCmIHcPa_3zttn{II4_O|Dm*AHMtU;#)Lvsuz0XdS?+| z61@8_a3t4bjr%7Ql{4i0w5 zb0eFBmBV2)qM#rZL1sfG;*xMg_C3F&l^iJLMUxQ*Mk2(q9W9d_2$DEnOk#!~1)rwe zoZ4-!BgXm0lH*<&lcrcGx@h}qrah@vB5RBNW4%4uOoo40l|S6E5&ofumUKcbtYka0 z4K10t{^_X)^b}p^>hGiGan<=U&TbSLC7miXIx>h#iy9|K`{iim7ooQAAdXFhvq4kT zHN@kvGw9(!|AH7Ri|KnL{Vi8Q@XtO7t7<(H5~`L0{hfFsaD71#3Gp@M?Jl?(+6^q? zu5+0F;HEFsJ<>KAf8)HP*0FBhYo;|3FJT{sRBxF5#p0pw-(u!BcorHk(tn0idVouWce$Mh;IUU%^xcXw~FT(-h9hEsoMl{4$yM!4LS{7G-fwsZCCz>QwW z);37jJKc6j$9#FWE_(wbU<5EBF;E%{+Txct5-1zNk}IH*SVel+ z;!<1j6HVP6&0W21y$wwx9aGDP@*Amq|41e`6b;WtV{=ner*qE#7G67XwKMcxM{4(# z<2FxCjvwlSEaXf`;b7hPR>je-#;_C3OKu+2Hdw=u{jw>Ksd>}yptyV4c(mI@ZLGOe zr_RZ6I(#u7hVFGlM4u~LhnZW@JRz^x4zd6V!92dQ<;-n%*S1=O%RqBo@;5Sj9w)O0 zxnF)zTM!8+^K`v&g!X7{YZ{CpTrwQ^g6Wa)=FCucYtK+icSB=CVtPDV7|38&@##!1 zFpV&l?HOv%_pxQ_e6+c-Yb-x@KHqs(Jz9ERK0}%J4BW;aO5Sb@WnLq^qKz$$u$i_t@vN26_XU38Zd`TV-ZPc09!(z0$?lgL3sLnGdE7`LdO!`(C4 zQ$KBFrsjvk{-wr&2=fc=D zR|%K$mZE1qR?ffp6U$jK+c8tHC8G(X@A(ov|fbA5D0Ogaw2VN5;+|*vCjd9YAzzc z{Q3kow!*8*SK%2vj*BDa*Hluid+v;0j|%|^3@l{-zC=O48@XNJC4>QvuoX1lL$}x6 zxCgTjBkELuH-(Rf<-U=`7Y_WjFaFdWhCRKoV|p}qdSvEWrCJw5d^;g;@0 zz0QTrUo5PRcR1l+_Q>6zqAJvd`Oxaa;_K^BZ)9IG?J1zC3|ro@y>P@Oi^|SO2kb@9 z&lmGD^{8C>77rSl`H{Cf8k|TZTtzo3c<2i;>vD58tJC z6v$NeLb&u`>PXU&Wa>%R@me5Ylj&Ild)#YogfBf}B53{^iTHy9M)X~A50V_l#$-$m3qEW)@AgpL z9l@(SX`kd&(mUi|x3r~`pPu?|IVFK?AQ!G6#&iki+=N+xFCyJ^=CbV!gXq|jrJF-t z9hqjfG{Uj42m#=RnX1x4E)`A znpS1q+m*YauX{xR02|Eh+LCtXChNG z;F;6L8p3kAHyb;NW?1T8{6ElrwO!p_dKRLT80eBjc{}%gH}sVu*>{uPcTNAC-M2U) zIKU-B7o7FZY4ybJn@<@p9qvkkP5!jm#ifLuMErGw3}{LCfm_@5d9k z6Qf<>7NEarh;jk`gYuja65cBBJ5_L6xfSJD`g@yPPLTN2FX)(Ja{;ykfuRb#jlv zulbMmI(eViE$sz=H{{l3C zf5B2}5|Pi1q`)*18t0X6xkS9S!i{zU-ksPvV8hH&owO9z*mJ||bnD%MH_qdq``qR& z8ed4$H_I6|=`*ZJ)8<&h-WeJyq`%!iLrcc`4CUeA8S*a}C6F_OmjXXk5Om-S!N@Tv zMxWtUOZ+q333#00jscgQp~X&hGx|0awCfX9TAK+UBQ0;oc{adP{bsk3{Dn~hXf8(X z1r6m29>Yo18X<)%c-FK@*TW~ENTe9knaF#C*Q$5Q41exd(y%rcF~IsbmMYcH)@&39N{LHWS!;o9vw~0e|n35`_Ju(ju^Tac8UMz~0NBlf7#Xy0`c8 z-`l;DGV!p$C~ z^&>KFPR*)#6fh|RLKyfibkohXG7QeD@bg#&^DMX%WTeO<hw~)aNO5yoh3$<66`9_W~c;vSjjUMW=yp;ER ztkzme5K>_k4Cp+>6@cQW@54H-p}@H&-&b!vu$l%84M6x?;n%@W8jhE(WHDf$n{oHK z4Z#gDMt~eSo6b_qnG`f2$!$nzIog9}gJ*9<{+WZxZhxw?S=18~^OAZd`m%G6@Xnli zf!ooHT;AqfGa8^!Z`N9~rQJe%3+W6;6kVaW7iM_)C_=gUazgmr*)r z^F`HXRS7pWpgur4AL#Ev4^vWm;Px0xnw8d^pmSazJj_+pZLSeqz*s*d_4$f*H3__8 zEvcQCx>~W;5;(%|pj#3qC=L7ZWD)3Sk>j^6h9jRmUq%cH)2Eyz?>6cc-XN)3z9O=kqh5YE1Tp)JAVu7ZHsFtt zfzfEnq=y?3zR24tTZvSnm|e*gP=4@t%&MzOF2@9zTC3qgwb}g028JMIf;s59TXq!a zr?y@j03Q?ck>1z}P<0c{0m{+e#3q{K9MMM&Nk0K7I=h@H=!mf+-LWFk+@j4vY|Y&` zpiNmpZ<>%)%X;<88nx18tU^ywFTK8!NuakB`aMwQx2vL?Y8T9kg5JG}QGIC92rn!y zUNM_MOeWY$Av>757A^_e z5QZ89p+Sl~UC^c6M$N1XKQ4V%Ut${M8oEL1#>?qN7xIva4h zjQK)3QwuIXXz_k4uiC*ARpvC1QmuM@b#r88gC~+(P`}A?2T$W%4jha6%#jhEM_6W= zy*A9;)f~z~Ud{;7p)pPzs+!(4Dte@uhl@xNzMS+PfIQ*PUg96y$3hwPp7WIsc7fHw zZT&PC!Rn<;ABmzFboP;S=k`7fy+#v=vLV=g&R?kXnWo7bPoKVVvdMg2mtMX98Bf1| zHIrGr|LM=Te>E-LV!wRy6}K0Qx4+_~`MmC}FFJYhMQ^?C{M-2XHhfdgfBB~5!SXvx zgtN|fE>Nw;Ie95xQn%{0iwb^gT`0!0~WAqL{!j7YQ zE{qCuWV|851nD3q={8U-`S1GeD9_o?dkqVK&!p>*?EP4Nh% za?qxsaPwJ7>4@#f%#@h;7WKWpY6tf^A(tw0jJn$n2!6B%m6rnvsF}OI2#?tJcO8c2 zcm4JQKm(EFJH&%e2^!q<&bjTW98N-@!EYFL+Na4iEJ8 zitDgBRy7F#9RL`|09~UD7-lL&ycl8>(k9&y70q5TBwa}4`!iL`@IYL&Ra$Xni{X3V zGO7g2^_GF*ILz~bZX3sb#F)^Mhrs*>Q^%_Su*oLc4WZx&IeFWFv5ByI--o-;9T z0ZGFCFqc4X(}o%EtdZd)G?mv5OzQ^1eh{ z;%8+5?~S)Mk^L>5pvLr4Fb*8UF;bKKn36`{Z9;m%V<#! zK`eWtU@c7wU~M2)z-R@Rixhk(y!?pDX!BiKYYtXTW>Sn z@((3E8!4Y>m`P|0`H%OCUmiUf)u%~YQxk^@{cHu@+3Mx9SemQPG6Q zg`vlbIC#wg1s0REK$9WVN?Fq6APz3;@re|4W>oJJ{+s>99&v!K17HM-j+V+62-AxfQpYfh6(M93(5va+;bzbwl^?#q6NjaSLXVv7 z=ge<_aT|#^i^-xB_(og8v+dsYxhw5JJ%Iff8ua|9YD-c0m^VxCuj-PX_S$d0w=U5+ zp#`wnH-LA?LOCbQm|PaQ&dulzVB#+}_AWm792`|`@ZdMbF|7)eeJhG4J`v$yL)z-HOgM_een6-$q7DNu{x)>zJG zEanHC?*MEcBCZ2Tn4yyf`w{m5UsU9@Dy(jnyw4MW3zJ zJ`~~gG6BM7xj=!znFc(k;u58c82!G{_+YL6x=<7=C5aIlkGLM0*#`u{YJPa#^Pu-| zaz++j)Lb75ZP`?J+JX9L#a>}LY})E($Hvq&l%mFEC`6spXC{}Pw$|Ty{DrsVS6AlJ z>G9(6!qH39naudfnf0fybhR(tbHkZu_7|^EDgFN3p@qJ&>3m}>-aT_}VeInyM7*In zu^ww_OmxV*?dr0wZ| zzDoL9Dx!vdz6&`i5N;^Ss?b+aZqM)T4f+Jud+E=HxaR6EM==OuTVlGv1%Z_T#j|3O zQ9LC<++W5#4B_PZwS-}sckuix&%UF>aJ}-gr@Q;_aQtsbc212foGG-l&sL`jxveYR7>?#fX8U`WCi9VaGP;&% zX~_?y+j=`%@QdNeeD}gczM*+7)zp+5z_}azjfGtGh}+3_GOH_v(Vl$VO}ONZOnKKX|j}O9N8-R+z){tN=;4pX?Hl&SCid)#bjAmahp4aWcxe1p6k84rd)Qs-Z$;W zLdVtbQMRtG%+Im6sKFo00i}o8BDz^G^*E7OH@IMgIS!t5g#iH{lx<%vEiW}I(~M9% zAEeP-eHiHU5P|pE7?7mnvRXZt2w1V)qOf#u^8wUksHXVkG;g~KPyqjS0L-G_{&(db z$bPJ1rm-)&8>XdR=T#H_&)sq^V&ATLmjdtFa3<07^7_yl-NvKK#UuT_?U`i4W_(XC zAgN`s3rSU-xGODhC#`r4gF{iBgNRT}-DL!}<^m)#Ar*#_0Z)UeOQ>6xaW(86P%8pj z=vI%$O~c2XvBKEc;^NYl0@lPdJ!!1g8={?E&9O}K`cyg@PJxRz#q&p(=i2Iv+od3p zXXo!sv<@~m4-9lClO4HCEY+3oN^K{4%Z2sGg%eZajW~66L%kpN>2;yEu(|`E*Ud~F z8f#BCq-?d$%mlL54!1fAT?G(2Logh<@i2-g^(b>eyd*UIjO0a`2DVsx1zhh;Ccx>! zc*K&tUB~76baVDlr^AJXtpsO4!-<w(_^Cp4O@y)gSu^nlQZT{e%(J&_+k=U z%ha?lgS(3pJrQ#&&|~J{KgvaZ2JyK5Pyx1U&%i($n$AFBpfEN%GCVYx?oapUJMsHA z-+paIu6z^?Dd5yXZY&%xhm$d{87KrJ_@(#&fSBS$a-2qXU-t5PtJgv+1Wh1ygKZ4B zrY8t3&~5Vg7Hcu71XdA^23(qu5HHQ!06%>peYzS<66_y+dj{=RqR?Eo0%n=ye)r@a z$Zjl)?10Zfr=5V{3z@R=%?w<+!)#=@p6Y zr~7*!YX9k_(x;{e-CxdGOJYcpOx;&27NjeLV&CMKUidMJ|=mcR{2`t8X3%E6( z^3gm+TFg44-TU4L8>89%yT$dg$G2LV)?NWt*bDXkzIF!OOIp=Vrqb^iiX=Tgp?+9w z)*j4qsYj2;+ghREwm?1xsS2&OX-10jp&S0yB2S-}uhfH5PL(7|Q#1(7kidy{NS>+( z0GohvBXRuzfjdk#WM?(aDfr>1e_t1X<axo6B+zjN@=9IESe}V z#|%VWRwg$9G*Xjw9tJg=Vh;2y!E+!xC;)U*AqyD19RK9;13p6CcpE?cei=(N|0$~V zN-d|s^pOGvKLMQ9U|0-}aeO8LAcuhK?WOMKmFOG1x0m@BB~)7!%4J>FSKhJlj(31? zzw{3E{L)XT=P#+o_4U#d*de|TX_vBeK)ugxA3(3zo^FN^Fhw`T6gBJdSf$62u`IMl zhnk!3>I}y+3F=Vn!zQO_C!k9bC=M~;bD}g|uLS}DHHb!}+OX%W4=t!q2_U>RAWv`1 zp($+TZr}rq#G{CTV-#qVEn_8iip%0p=-7@v!Tc$psrisCzdeXex(B~KPs)xCXSGPp zYD(nrJ^bs5+iX!?|9UN3EiHmaPUe*1UVNmcl=s}n=Z~EY}`)|CXLD^A9%?YcKFTvA+Ox{|Z zhXW9>)|)&;SkgjR^YD$z<@h~P>f=nQudx%r(L;v(%>a1<;iZd}@Icu~C!QP+LBmhR zB+Og&{pvxrsB~bvbPslamQmE{{u}1sqv1+>bA;Y;`LWMYAE?_3c(Lo$+QdfpK9hHa8b8Ee>VkZS!|ucEY99>5F42@*|lGr#>Tnn5Uhh z3v8T5Y5}dnfxa5p$msCdAJgn0;MjB+E1xwg_)StBOehvR=a z+?vFT&((+K<5mE!2MZu*PN45{cPN=`P6|9Sy&HE44}RYyH4eZHC6X940o&5RC3t@{ z*6o0ts{Oq~DYVQ^1*svhWAg^csG2XZLDFlmb@!TWdRwGMjVe z(7inwc-NJWaLyrCV45+Y3=!cS{T!Zv8xE2gUBSFw2AU^M;m;J!=;T`VG+Sj*wK0IX z*4J!`@H%0k--~towYH>Xmv40U@-B0&tbwS_m%gmh57**9??q$pAKQxC7+&-DxRnm| z{4MLyCo$p)4nFh@VO=)vi#i_KWwI6=+KxV3RtyFr;Imt=kshcv*onr8^j48$O!@1i z4i&xusx55Y1rF8|e804g(=Djxu_~r_h~jW390MM}VY}sG|0Z|$N^}g^!^_I9vYHyd zPo=MxQym+jf3-f1v1VgRy~fRRYU7~-<~wz!r!rH1)rR#Qjs>bV(Eki-3Up4`4@q2@ zm-B*x4O2-vT=g`bgTDb!;_x9q{PyTY{V4vHJ{)MCtkTeLz#2WT%YO+W__pIQSTv@% z12cP}yrQpZ1plBP^0rW{BWO=M;2eg1yxUx4MX^V1@>F+K<@1l!BG7uS&trQyd0Qkvbz?KCIQ4R2g?MM#Dh)qj;#Cub9q)9O#A82k@1u z1)8Jy;Dw{uDz#H|dYURVIDcqE#KX~e0UQRi^9q5QHGrrI%c8{YH{l+{>+SU;FO4;8 zbvQ``S!k?OyHbmPNNw6vr7f5SDW3I)>YpS=5)b8_klVxCrpCe+ppB`j+a~{*X$Jqu z2>(c-F{!pYa7(CAtHn83_CU^2+;&+N&mhxbU~JZ3TBc>-N~x@r?;@bK9QD^>fAW}D z;J%&CA~p!Q%nbmM&|lUdRuV?9Ml8d2Lg)qNl|g!d4Y#36735x4b=l9>4t!HqePIQT zU|cnW`4tb_zFf{#bn)9p6)SX)7Zr^xX^q0ct2Qw-fif+H$>M?tHv%AN4+AANmZdEv{ zV--VwmZqb|&w7_|nSr7^aAd_F`Exe6yu2{pMc3+*;Weu_Ee*umPZaDJpc|%DqJ4Pr z`eM0Hb?WMO!q1XedPcp~&CA^0Ok8@an9fVgZWViZ4y1U9w~=m2AS6zoDsF~TUD2B# ztw6%MiM@C_Xfc!F)O{X;B^;UbR{1@ncl3OqSJT**`$5BBdS3kD0+`}}*{;}rG|ufK z_HHJ-%6Neo+J463G1QR6ks$}OJ<9ghLzZFe^lk1Q-(wNNWVU89#qe+#{}eMB=aSAH ze8Vri;o{Gq{qTp+{`|!^{DL0$aAEV+&O@6C=SMbQ>U@_!L#{ZXVZ?7f0oq3$ka}nE zZW&HZN>xYM-O5DfbV~~Ea3{-a1^J*S6GEw+&5D*)XsyJ)lwON(UT>Wxcq)qYg}}n7 z^`T74ja9uxUI0xYT;MAzhDYdg_tsdfW1bhc6^=?T&dOWHU(X-c8b6nq^V&;q?K6hRAPNJa+B zp|q312fDWxcu&kCV*tPeGRHBq;qV!tj|kQp+9vGN2wZlksW? z)tr#q`>c=^Ka)vYu(8du~R zIYQmzu4c%NaMx%UnND6_6wMP2fggSa>|S1uuD-;cy}Y*K&Z4}o;Y?V)#Y@xv(j$*t zE{eI0kU1ll_ruz38V`1Uv$94ZtNalWA*MiU;0 z(4St9X`Dvu7LR}RI;gWUCX=0>JqT!;jP&d!y6dZ-hcO0h5$B?uJ1d}=E8~IjYH(O` zuNKrGlQQwo1hmc4x72$}FH_$t-KBn6x9pvEzHfbc^G#m9CHRl}sMsz|q0en|21S2w z`1(L82Kl9@hW!!R-NjR#+a-zQ5vka>Ptv?doVO3(DOv^$%|{WNJjP*7??x}pY~ zTdc$b8g8_{rfRRNjRzPryHxvm@5tzXsfh==Fj*l6Y9@^Z@L|Op6;}Fs04D-70)5O7 zxdgo6yt!=X_qI8cIe)KI^MM*HH^BVdZP6llDuJ(NuptO71O;S&r^112=2U#__+Q?k z{8<1)9yG((Hs9*wi3+W$d8bD`2*e-xRi>>u@xyKo*Ux_{vV+5vBcMa4r!=4*Q}duU zS+71X>nB{ML`cLKZP!`gjYA_4frTVBVpXV|2rn|S zovkEg2?=Wmq3k@ikiw&dPzVGHv;j(hmX@WYh0+!XP}YV%Nc(sJkKgZi&b{Z}8O><1 zobT&<9}xN8nK|d&bI^NyzvGLL+~nb+m?TgIovPZ~?02AZjU`kfc^j0-)mS zK!qzGeEo21uN?6x?A`d)Q;TQuF;VVdS);jRW+1_sT5*|Km^^yerAF&;z9*M6BE3;r$w}y=Jvl$>TsE zsKN~{SQUQ4@ldlu84_7IgYnC9Oq^3uyn=HIM>|pc6xK`n-q^ky;i|rH%;TZfy z0ubK_DMCm;1TfAB$Csf-B&!tP#+Tn;>IDg0h=P}6IJSf>nPVV-tr-OK9tLq^;)Ytx z0FyyIU+JsBxCme>XDE*7S||5<27{^?B0A9k>{=nX&oh(5g28EB2fvwfXf3;6o_Si& z!3-Vj5Rk4%toZKZDGA19oCMwMM-<^nm@~4R$Ww*M#9Hee@7R3wsI2)KaSB_TFZKJ| z)}9_oZ5E%YubpK9SYQC;>@-UNPo+pobBz^gZe%8&AOt@=w+iT%Y=8Qs1TtlzYLY*Sq@Shs0x{igchw*1~3_Uyi)HyZ7|VfUUJ zdh^9?+iyFtX3c@ywmaAB?w=VRp1J=z`zp_7CI0<6-X&fxdQO6AVPCmW>zzk}(9ZDzFE~QVn@=DyBfnL&1@s@4W93XkNnR zhXd6JPf`Bs6_{tzn71PIKPZfOR=gcPcLFS}fg}eq=#q4cOplDWBq4HSacfORQygHt znB*B{i4Um;l^saPR90ALH$p>9-^8R@z_{S(8-@hX?$={2lY>>gJ-wCnhc@kc-at`) zqOl_NHSy|Hl{iyYZT8jnH^=>Xh51_w;%#G7^;KORT@|U%ZR!=TD__TV+XQ&3#k-9f z@1{raS}$fcOWJFJfdSPXh&_ia93r&9)Dw ztP^W(&e0p0bs)*e*LTJNP7BbYP(|dX2H{18MM}*ojKjdu)Un!Nnzk@bvWHfNO^U$8 z?%v|-ZhZO6JI7m!i#o4Z=d9F~$2L?J*L74R2bxN&H=W$G=}7&+4Z~Gk>*{w+PFA$_ zRo3>mI?Gl+az$tLKv!d4sB!IhL(iV^CY8oP$qHRqI}@L8LslOp2NJWhU&n>x|x{%}u_q}doD8>;rX;fv7uQ~-JG&y_& ztKVWem`2>`#?_`qo|D3k;&iC#dAe&SvXwrSnzc)Lo`Xq6v9Dnu-V{fG(h!P(zVMe~ z;KYe7sZWXhLt<3yNIkak*=IY&H!<%WfMpZ>Xp`~Rgk@xJE|$?IVWUYjX;>a862jjA z7)}C)L6uN!23w+F7_Uuisq7wJ7`Dx3xw=YdC7?1vnLUV9Y+GX)UBF@@x|*x2pPL)z z$!Lu@px(q{PH`6ruwpoX{Fn9H$J?7mr%q{}aM$&w=fGZXPHsjcv@QWT5Ct4Pdww7~lb`BwjYKwgVa7%0}a(q$}>K z4I!7yFfd#tR*uXGR*gJ8B6YNLBM_czY4THpu|A_06BVfHI_?1I!lfYLQYeHMb>K%3 zj_*C{rOfHF5jk8G8kAn^gB#JLJb@%G55$@`4-af;$PXts4i8PX#=bnZr$3%o*R#I$ zs)N0;qQ2{Q;N6QVHa>sfj(w#y*Z#O}VyK~Icw<%N#BfXF(1duj@4#)_ikoUm)=V8} zZ9OoxX5hw`?!MtIr#2+=CuPk-Oq217tx~sl*8-YFf>Z?{K!hS*21kliAf!siddiil z7Cd7nL>8wV_*;~4Y)bnPL*Ty*aZ=SP8&H~^v(RJg70;v|Hb1PuKJ$=jcZ2p!QTa@XWnrx&`n?dYfB)3`D?|%HU$|tjZFp z(7NdE9BV0xHjel8jWCUHvU+q)ZS9)T>MN&G4>WJs-CeW3zb!9u zU~R|F^^J||cXq5jkjQK6UtiO`dqXqVHT|r-Zn3jci3J(PI)X})Bfv=kSI;GNKwMHu zeQp$$L}8e;UO>k1<&YcU^vCH5urd^=qtltb!uG;uux--Gp}^-J66eq;+V~B5S~fOB zJf~%CIZn%1OZsU!u~(iJGo+5oL}k@R9G5{Wb!FdyTc;Ktm(%0wxY)cZ@+#1;?y!Z5 z3aB$s98JU$N{^;zHV}>dGsxy6Tz(SI^&Z$Nc=AV09dR6iHh{WItPmxzJ6h#DGE+ zJgDM{z73xeP}N$I?5i)T8oPNLiSw@Y!^n2E>xQ|x&UFn1vBnAUxVM? z@S)BH%{jO{eljBYNo2iQF1N@{SIpub(VTj(cxUS0o^`mz-}8#k8Lx=?RyDxOl&e_JkJ27{(C`Wd z4TmB1sF4v3moYF*G}6amSI&Wv@x>Pk!&@W-J{whUm`uVme{V4+g_!h&Cw;k5Bem9i`j%RkqhB@+k_cm4I2K#+H$e>>p~4nrVece( zn68%_%jW1F)QKbb8=;eCXZC*DwR{CnvR3*GNAo>0&{o>k6ly9RD1Gd+H@~O%r#(Bq z(f^G4b-4e#YZ;@ zqG7Czjnn0UwzYlm)PkJRuUe;RsdYbi58Lvw>cf;98QQ(3c!K(mxP_r>oSCfdx@;$aWR@O2qSqPy@8Rzhk zRLn4dwzOgdY9=T{`ZC*Mcq|y#34WMZuZ$oCS+jA(RX;l^PMLlp9t0lgHFs96J2=s@ ztD$zw&>E|?sHoLiGqk0)VOPtZ%pgW7fkEAjNTd&$xS9gXPVs5lr(4kuyf8jnr?_Wp=%^1oy(cwp=y~1mFS^@NJ z<(aaramR{7c5xmgO&9+D$@b#n_R0QRCf{$}FjieXc7wH{x_?_|=eGXp)V&+TGfkUr zT(|DVO-&e?bN(XcoNt`A3cNYK^lY11G zaCmq>YW`GXUX+crg_e;9X&-42?@j$ibfsPo#rTsV{M5sk;)XZlf%g19gZbqdH(T(& zpw^80s=ab(ld-d0VX5pH$=N|&hQ(~f7=k5AT@wy_(Ek$S1FCqjX9OL)oX>!GYid#) zNqsl<)Kh9E3TA%>nC&p$!t>ftUr}BVw7CtW)3tBXlLbhDl(l2#}<4oEtgEx=4pv{t!7)kwCg>EeMEci z9vV}rMUX^=3n&+PD`5@PLUz(lRLgt>YQL^vaH4w%>7br~nE_~42a3g~Q{S7M6v;^f z&67{2=p7-9Gx+x-SSO!hW$~ek;nzqx-J-B2aFxpGNXUl`AVJQVNf?fz7`GoWi)|RU z&^Yv1EI*9EaqUD%b1-r_(sYe;hqD0Oq_9na)Ii81;rxyOBK%B*Q>ELnxjGUfGNYM{ zej=0A)?ZSaSUWzs=XphKg(Cr9^ICBx^=9ei!gnPu`QH*IeriSXBRs4i&5oB#A`i#| z?l3|YhlVy2TMLH|2NIgRKNFj}4j*WVj~+Palx?9{2G@WCL4Z&wlTe`Y1d`73U^}@R zmo>b#f`M%aKrQFCkc0tNefZV2UznJjeCm0kHT6%a@z058e((c(U5H}?#?)g9ed)bo ztt7cactu;s4|h*^mCK^_Tq3zI>Q@Y9DznK5QRl$hi8CiR09&G&dX9M*AK)`8b7Dsg z>D$0ZYHVJgkHj^QznKghU;^|-=%a#4+eFBeb!BTt%a-fJM--=*m{FqrvJdtbLqq-U zXGjFMMeYu`7istS)Q4Igyi#1PAwB%PAl>NxDqmRp zKeC}*0C396FSG+qLDz6#ofShAR<q8vAT+=;vTyl-&c2cQvY6}^BVtm# zFm(^s<{n&u)MSnq6y2eoY)OAMuxy&wQP`(}2NUlmd0Aj=A|9s+F}DKnmJGvaW=(DJ z&4T8M{(*_+Sgd(spnsydU~|PVl0siMT;W`w?Yv^RzJB>NT z;p*`YQ%ttR9FAv%xQBf8z4wx@{>kKD_gd}6C2cU&>?>)mDfv=KP4jC(NUFNWlF6}d zAZzZ!J?4kxK6Drd9&2r;^M}SPRY(Jx@QV^GH25DhS0tcFxh$mp2&l!c$|aO%lOSlb zL<_bd7>WntElwdGNO%i3Vr++~POa}AZz;;T6$f8jB}TX38SUA$x6j@dJeBl=Hkc@K zhj);Fj@88Ay65bM<|96+Y9tq^CI)$d%b#a0-Ut=PvFKC)(;?^P2j$MEF;e z|Ill-7IzNbcJ&EyV@X3r;iH8W4KI5_Jd@}cNe=8R`sxz^62<3!9Wb?)vRPkmM;mR` znud3DK_u%2y7R#|Mfp2BaK(d;{*=>DOM$XkW?S&!WcEk|@FcbNss}qNfGf?WM3l|} z4#-KjAy}|qRoF1lJhY<+-UIu_OV_Mvizcoa-8VUU#qdsiw|3WP>vavaR}bylD2|m@ z6y^B>WesZ^lj}Mw{ozPpeO}Sp-j0@Fpro!l**Mx>o?oy&5<|eOF^;Ky9WWR&o|HHr zWi6G+p>cj3*xM0j6z%qKF;WKuPEt1DiNdQ^vjcBjMz$H)9c_FKTyiv*jzv!MOa^wT zF|B2d=0J;+HoSA{ec}z`vD6On*oF<*G)#nfPkeayV&m(Kfneo1$T!8J*nM~SaX8NG zDh2|3?Qn2)_z?l!ILKaf0<xo2`S^MzCWz>fq zz3zh^YUjJ&1ww8@5w%ZzP`{(nMg7?2842$y&x3-t|yR z!kGRi9~`{@{=o;Yntt&WFP@fb^Rd)Utj}5T8ve;OYR4`rS@%~GCqUNC3!^-Q%Li64 zAvc_H1p>Gz(c#2PBIF#6X7BIPi@k~saZ-pJ!0daGLzirf!vqQkEFmQT;tO&YWT4p# zyTLyCq%7|N)=Gx9!Dpb9H3(5N9&%3m=RbMi)vrGA$(s@seNDj; z-lh_`BW%21o|Q1NLt}+BJLMT?a`68xP{RNtVhcUqqmnJeUcpVr4(PYK zJ?*&f_`_CGMJc5tibibBtD=JgL(2Tm*g-4xRk#U1F0SYiuh`g=x@{w6TdAj)aeS=8 z@oBs)lBy%kp`EE8cpLh3Q%G-7YV0us*qWFj6=Ep!LD?EHLlje| zTm!=I_x}HAzu1AS9Z~i&h;$<9{k>g@0~YJbwtiWg zTAB;w3=l-@AcA%7QziXp9voKj4f>E4$my!mU_b0{|U>#LZJbvfqYA_-OOP zpr)p}Iu@;NtZqbdOw^UAtWZAQ%$X|l8Iof3R6%zO9t2hhRI5bl3Yg@8o+09aHDPuD zChCubO$4U8iFy0%P9by)5%K2e?CmF$+okv8&w(K7AZG9Gl8tp)`(c8dtnF6d&9ne$ zVcrf;#OqYX2qKL9L}8qeU=YB8iYg?dN@iVbJWWVeF(d18&BcWp$Dba?+>keYf(MK~ z>Lg3JXlGI=^T5*w?eDI38l{F^O62k(Pl6+@Gg{3PR8 zJ!7`i36xAAbRmSz?2t7O^2ivm&{`;r5-~swp1KgI+yK4`b`o$((TJ(*LJ(3X)5ijX zV^=)MK5CYc7)wn_W^2Ysd8k~Hx1d(Tf_bJ-%L&ZBI>%rL8JA4`64P`C>fN$%Tcy%5_8tEUpzA+gUMJzX)qjIqfBo zbeW7^OKGGnGB*+NSi2*}(A@&Z7!snEv?!m2ST+&z8CBcFR!Cw+nGUfq(1NqJ$ijZ! zE`*HX^e+6F;+3aoHbF}&wh0g88Qs$atR)7KKt(0irOsh>Pj_~_6sFTscXwYaN>Wd^ zd-n51o6^}Q?3^QOmm1|p9jY4CBoe|%)FoJu4Th9>8FKzbkPjNsnVNfAQ?9rtCiF)s zy0qg@{t1qr+4v`9Kb1_E!$0Njp2JT zC7dmP?#;=2L$~z2q~oPG3-5LP=i!Elm1fTGuSoq_i}5vjFf}HtE4k)aOCV)!YaEF} zEms=@nyiCsmk+L(ctwbncH%r>OWUq06X)?h5U>jdQLgFF*P$tP%`~^F;_>acKXz!{*KNX290*` zLjXlafR>fS@*N$7&bl{Fp&k{Ge90mR{|&6M%YwkP1UZ>A>08M6@cG~~fO3_*4+c^D zhddFNa%Ax_ltO|FgMumSZfDhW?7&u*?pRk8|HV9HdQ9oQCY#N8gm)u<4{Los~Hvto)6iYhd*2#fTS*BF%a^c!)3vc zKti@O7{Ug^sNn119wPJpa$qRu7#N8PDNZ9C*Cd#^b=M#4%EpwGLm6Cj?e>2 z|A(>6J`7t2wIG0bx@W`U3$uVOzL=uTR@3}?(dE=>F;n;CUQ*0&=fx@HxlAF}>E+TU zJZKDdceOUxRK-iRM^s+|tJhuuYTXfm+15v=KS~nBvRKsx&iY^lMsjq)x`7hNWs&

{{ metric }} + +

+{% endfor %} + diff --git a/teuthology/task/pcp.py b/teuthology/task/pcp.py new file mode 100644 index 0000000000..80458a1317 --- /dev/null +++ b/teuthology/task/pcp.py @@ -0,0 +1,335 @@ +# maybe run pcp role? +import datetime +import dateutil.tz +import jinja2 +import logging +import os +import requests +import time + +from teuthology.util.compat import urljoin, urlencode + +from teuthology.config import config as teuth_config +from teuthology.orchestra import run + +from teuthology import misc +from teuthology.task import Task + +log = logging.getLogger(__name__) + + +# Because PCP output is nonessential, set a timeout to avoid stalling +# tests if the server does not respond promptly. +GRAPHITE_DOWNLOAD_TIMEOUT = 60 + + +class PCPDataSource(object): + def __init__(self, hosts, time_from, time_until='now'): + self.hosts = hosts + self.time_from = time_from + self.time_until = time_until + + +class PCPArchive(PCPDataSource): + archive_base_path = '/var/log/pcp/pmlogger' + archive_file_extensions = ('0', 'index', 'meta') + + def get_archive_input_dir(self, host): + return os.path.join( + self.archive_base_path, + host, + ) + + def get_pmlogextract_cmd(self, host): + cmd = [ + 'pmlogextract', + '-S', self._format_time(self.time_from), + '-T', self._format_time(self.time_until), + run.Raw(os.path.join( + self.get_archive_input_dir(host), + '*.0')), + ] + return cmd + + @staticmethod + def _format_time(seconds): + if isinstance(seconds, str): + return seconds + return "@ %s" % time.asctime(time.gmtime(seconds)) + + +class PCPGrapher(PCPDataSource): + _endpoint = '/' + + def __init__(self, hosts, time_from, time_until='now'): + super(PCPGrapher, self).__init__(hosts, time_from, time_until) + self.base_url = urljoin( + teuth_config.pcp_host, + self._endpoint) + + +class GrafanaGrapher(PCPGrapher): + _endpoint = '/grafana/index.html#/dashboard/script/index.js' + + def __init__(self, hosts, time_from, time_until='now', job_id=None): + super(GrafanaGrapher, self).__init__(hosts, time_from, time_until) + self.job_id = job_id + + def build_graph_url(self): + config = dict( + hosts=','.join(self.hosts), + time_from=self._format_time(self.time_from), + ) + if self.time_until: + config['time_to'] = self._format_time(self.time_until) + args = urlencode(config) + template = "{base_url}?{args}" + return template.format(base_url=self.base_url, args=args) + + @staticmethod + def _format_time(seconds): + if isinstance(seconds, str): + return seconds + seconds = int(seconds) + dt = datetime.datetime.fromtimestamp(seconds, dateutil.tz.tzutc()) + return dt.strftime('%Y-%m-%dT%H:%M:%S') + + +class GraphiteGrapher(PCPGrapher): + metrics = [ + 'kernel.all.load.1 minute', + 'mem.util.free', + 'mem.util.used', + 'network.interface.*.bytes.*', + 'disk.all.read_bytes', + 'disk.all.write_bytes', + ] + + graph_defaults = dict( + width='1200', + height='300', + hideLegend='false', + format='png', + ) + _endpoint = '/graphite/render' + + def __init__(self, hosts, time_from, time_until='now', dest_dir=None, + job_id=None): + super(GraphiteGrapher, self).__init__(hosts, time_from, time_until) + self.dest_dir = dest_dir + self.job_id = job_id + + def build_graph_urls(self): + if not hasattr(self, 'graphs'): + self.graphs = dict() + for metric in self.metrics: + metric_dict = self.graphs.get(metric, dict()) + metric_dict['url'] = self.get_graph_url(metric) + self.graphs[metric] = metric_dict + + def _check_dest_dir(self): + if not self.dest_dir: + raise RuntimeError("Must provide a dest_dir!") + + def write_html(self, mode='dynamic'): + self._check_dest_dir() + generated_html = self.generate_html(mode=mode) + html_path = os.path.join(self.dest_dir, 'pcp.html') + with open(html_path, 'w') as f: + f.write(generated_html) + + def generate_html(self, mode='dynamic'): + self.build_graph_urls() + cwd = os.path.dirname(__file__) + loader = jinja2.loaders.FileSystemLoader(cwd) + env = jinja2.Environment(loader=loader) + template = env.get_template('pcp.j2') + data = template.render( + job_id=self.job_id, + graphs=self.graphs, + mode=mode, + ) + return data + + def download_graphs(self): + self._check_dest_dir() + self.build_graph_urls() + for metric in self.graphs.keys(): + url = self.graphs[metric]['url'] + filename = self._sanitize_metric_name(metric) + '.png' + self.graphs[metric]['file'] = graph_path = os.path.join( + self.dest_dir, + filename, + ) + resp = requests.get(url, timeout=GRAPHITE_DOWNLOAD_TIMEOUT) + if not resp.ok: + log.warning( + "Graph download failed with error %s %s: %s", + resp.status_code, + resp.reason, + url, + ) + continue + with open(graph_path, 'wb') as f: + f.write(resp.content) + + def get_graph_url(self, metric): + config = dict(self.graph_defaults) + config.update({ + 'from': self.time_from, + 'until': self.time_until, + # urlencode with doseq=True encodes each item as a separate + # 'target=' arg + 'target': self.get_target_globs(metric), + }) + args = urlencode(config, doseq=True) + template = "{base_url}?{args}" + return template.format(base_url=self.base_url, args=args) + + def get_target_globs(self, metric=''): + globs = ['*{}*'.format(host) for host in self.hosts] + if metric: + globs = ['{}.{}'.format(glob, metric) for glob in globs] + return globs + + @staticmethod + def _sanitize_metric_name(metric): + result = metric + replacements = [ + (' ', '_'), + ('*', '_all_'), + ] + for rep in replacements: + result = result.replace(rep[0], rep[1]) + return result + + +class PCP(Task): + """ + Collects performance data using PCP during a job. + + Configuration options include: + ``graphite``: Whether to render PNG graphs using Graphite (default: + True) + ``grafana``: Whether to build (and submit to paddles) a link to a + dynamic Grafana dashboard containing graphs of performance data + (default: True) + ``fetch_archives``: Whether to assemble and ship a raw PCP archive + containing performance data to the job's output archive (default: + False) + """ + enabled = True + + def __init__(self, ctx, config): + super(PCP, self).__init__(ctx, config) + if teuth_config.get('pcp_host') is None: + self.enabled = False + self.log = log + self.job_id = self.ctx.config.get('job_id') + # until the job stops, we may want to render graphs reflecting the most + # current data + self.stop_time = 'now' + self.use_graphite = self.config.get('graphite', True) + self.use_grafana = self.config.get('grafana', True) + # fetch_archives defaults to False for now because of various bugs in + # pmlogextract + self.fetch_archives = self.config.get('fetch_archives', False) + + def setup(self): + if not self.enabled: + return + super(PCP, self).setup() + self.start_time = int(time.time()) + log.debug("start_time: %s", self.start_time) + self.setup_collectors() + + def setup_collectors(self): + log.debug("cluster: %s", self.cluster) + hosts = [rem.shortname for rem in self.cluster.remotes.keys()] + self.setup_grafana(hosts) + self.setup_graphite(hosts) + self.setup_archive(hosts) + + def setup_grafana(self, hosts): + if self.use_grafana: + self.grafana = GrafanaGrapher( + hosts=hosts, + time_from=self.start_time, + time_until=self.stop_time, + job_id=self.job_id, + ) + + def setup_graphite(self, hosts): + if not getattr(self.ctx, 'archive', None): + self.use_graphite = False + if self.use_graphite: + out_dir = os.path.join( + self.ctx.archive, + 'pcp', + 'graphite', + ) + if not os.path.exists(out_dir): + os.makedirs(out_dir) + self.graphite = GraphiteGrapher( + hosts=hosts, + time_from=self.start_time, + time_until=self.stop_time, + dest_dir=out_dir, + job_id=self.job_id, + ) + + def setup_archive(self, hosts): + if not getattr(self.ctx, 'archive', None): + self.fetch_archives = False + if self.fetch_archives: + self.archiver = PCPArchive( + hosts=hosts, + time_from=self.start_time, + time_until=self.stop_time, + ) + + def begin(self): + if not self.enabled: + return + if self.use_grafana: + log.info( + "PCP+Grafana dashboard: %s", + self.grafana.build_graph_url(), + ) + if self.use_graphite: + self.graphite.write_html() + + def end(self): + if not self.enabled: + return + self.stop_time = int(time.time()) + self.setup_collectors() + log.debug("stop_time: %s", self.stop_time) + if self.use_grafana: + grafana_url = self.grafana.build_graph_url() + log.info( + "PCP+Grafana dashboard: %s", + grafana_url, + ) + if hasattr(self.ctx, 'summary'): + self.ctx.summary['pcp_grafana_url'] = grafana_url + if self.use_graphite: + try: + self.graphite.download_graphs() + self.graphite.write_html(mode='static') + except (requests.ConnectionError, requests.ReadTimeout): + log.exception("Downloading graphs failed!") + self.graphite.write_html() + if self.fetch_archives: + for remote in self.cluster.remotes.keys(): + log.info("Copying PCP data into archive...") + cmd = self.archiver.get_pmlogextract_cmd(remote.shortname) + archive_out_path = os.path.join( + misc.get_testdir(), + 'pcp_archive_%s' % remote.shortname, + ) + cmd.append(archive_out_path) + remote.run(args=cmd) + + +task = PCP diff --git a/teuthology/task/pexec.py b/teuthology/task/pexec.py new file mode 100644 index 0000000000..4d18d27193 --- /dev/null +++ b/teuthology/task/pexec.py @@ -0,0 +1,149 @@ +""" +Handle parallel execution on remote hosts +""" +import logging + +from teuthology import misc as teuthology +from teuthology.parallel import parallel +from teuthology.orchestra import run as tor + +log = logging.getLogger(__name__) + +from gevent import queue as queue +from gevent import event as event + +def _init_barrier(barrier_queue, remote): + """current just queues a remote host""" + barrier_queue.put(remote) + +def _do_barrier(barrier, barrier_queue, remote): + """special case for barrier""" + barrier_queue.get() + if barrier_queue.empty(): + barrier.set() + barrier.clear() + else: + barrier.wait() + + barrier_queue.put(remote) + if barrier_queue.full(): + barrier.set() + barrier.clear() + else: + barrier.wait() + +def _exec_host(barrier, barrier_queue, remote, sudo, testdir, ls): + """Execute command remotely""" + log.info('Running commands on host %s', remote.name) + args = [ + 'TESTDIR={tdir}'.format(tdir=testdir), + 'bash', + '-s' + ] + if sudo: + args.insert(0, 'sudo') + + r = remote.run( args=args, stdin=tor.PIPE, wait=False) + r.stdin.writelines(['set -e\n']) + r.stdin.flush() + for l in ls: + l.replace('$TESTDIR', testdir) + if l == "barrier": + _do_barrier(barrier, barrier_queue, remote) + continue + + r.stdin.writelines([l, '\n']) + r.stdin.flush() + r.stdin.writelines(['\n']) + r.stdin.flush() + r.stdin.close() + tor.wait([r]) + +def _generate_remotes(ctx, config): + """Return remote roles and the type of role specified in config""" + if 'all' in config and len(config) == 1: + ls = config['all'] + for remote in ctx.cluster.remotes.keys(): + yield (remote, ls) + elif 'clients' in config: + ls = config['clients'] + for role in teuthology.all_roles_of_type(ctx.cluster, 'client'): + (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.keys() + yield (remote, ls) + del config['clients'] + for role, ls in config.items(): + (remote,) = ctx.cluster.only(role).remotes.keys() + yield (remote, ls) + else: + for role, ls in config.items(): + (remote,) = ctx.cluster.only(role).remotes.keys() + yield (remote, ls) + +def task(ctx, config): + """ + Execute commands on multiple hosts in parallel + + tasks: + - ceph: + - ceph-fuse: [client.0, client.1] + - pexec: + client.0: + - while true; do echo foo >> bar; done + client.1: + - sleep 1 + - tail -f bar + - interactive: + + Execute commands on all hosts in the cluster in parallel. This + is useful if there are many hosts and you want to run the same + command on all: + + tasks: + - pexec: + all: + - grep FAIL /var/log/ceph/* + + Or if you want to run in parallel on all clients: + + tasks: + - pexec: + clients: + - dd if=/dev/zero of={testdir}/mnt.* count=1024 bs=1024 + + You can also ensure that parallel commands are synchronized with the + special 'barrier' statement: + + tasks: + - pexec: + clients: + - cd {testdir}/mnt.* + - while true; do + - barrier + - dd if=/dev/zero of=./foo count=1024 bs=1024 + - done + + The above writes to the file foo on all clients over and over, but ensures that + all clients perform each write command in sync. If one client takes longer to + write, all the other clients will wait. + + """ + log.info('Executing custom commands...') + assert isinstance(config, dict), "task pexec got invalid config" + + sudo = False + if 'sudo' in config: + sudo = config['sudo'] + del config['sudo'] + + testdir = teuthology.get_testdir(ctx) + + remotes = list(_generate_remotes(ctx, config)) + count = len(remotes) + barrier_queue = queue.Queue(count) + barrier = event.Event() + + for remote in remotes: + _init_barrier(barrier_queue, remote[0]) + with parallel() as p: + for remote in remotes: + p.spawn(_exec_host, barrier, barrier_queue, remote[0], sudo, testdir, remote[1]) diff --git a/teuthology/task/print.py b/teuthology/task/print.py new file mode 100644 index 0000000000..6594c16819 --- /dev/null +++ b/teuthology/task/print.py @@ -0,0 +1,25 @@ +""" +Print task + +A task that logs whatever is given to it as an argument. Can be used +like any other task (under sequential, etc...).j + +For example, the following would cause the strings "String" and "Another +string" to appear in the teuthology.log before and after the chef task +runs, respectively. + +tasks: +- print: "String" +- chef: null +- print: "Another String" +""" + +import logging + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Print out config argument in teuthology log/output + """ + log.info('{config}'.format(config=config)) diff --git a/teuthology/task/proc_thrasher.py b/teuthology/task/proc_thrasher.py new file mode 100644 index 0000000000..c01911c5a8 --- /dev/null +++ b/teuthology/task/proc_thrasher.py @@ -0,0 +1,80 @@ +""" +Process thrasher +""" +import logging +import gevent +import random +import time + +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +class ProcThrasher: + """ Kills and restarts some number of the specified process on the specified + remote + """ + def __init__(self, config, remote, *proc_args, **proc_kwargs): + self.proc_kwargs = proc_kwargs + self.proc_args = proc_args + self.config = config + self.greenlet = None + self.logger = proc_kwargs.get("logger", log.getChild('proc_thrasher')) + self.remote = remote + + # config: + self.num_procs = self.config.get("num_procs", 5) + self.rest_period = self.config.get("rest_period", 100) # seconds + self.run_time = self.config.get("run_time", 1000) # seconds + + def log(self, msg): + """ + Local log wrapper + """ + self.logger.info(msg) + + def start(self): + """ + Start thrasher. This also makes sure that the greenlet interface + is used. + """ + if self.greenlet is not None: + return + self.greenlet = gevent.Greenlet(self.loop) + self.greenlet.start() + + def join(self): + """ + Local join + """ + self.greenlet.join() + + def loop(self): + """ + Thrashing loop -- loops at time intervals. Inside that loop, the + code loops through the individual procs, creating new procs. + """ + time_started = time.time() + procs = [] + self.log("Starting") + while time_started + self.run_time > time.time(): + if len(procs) > 0: + self.log("Killing proc") + proc = random.choice(procs) + procs.remove(proc) + proc.stdin.close() + self.log("About to wait") + run.wait([proc]) + self.log("Killed proc") + + while len(procs) < self.num_procs: + self.log("Creating proc " + str(len(procs) + 1)) + self.log("args are " + str(self.proc_args) + " kwargs: " + str(self.proc_kwargs)) + procs.append(self.remote.run( + *self.proc_args, + ** self.proc_kwargs)) + self.log("About to sleep") + time.sleep(self.rest_period) + self.log("Just woke") + + run.wait(procs) diff --git a/teuthology/task/selinux.py b/teuthology/task/selinux.py new file mode 100644 index 0000000000..de4314822d --- /dev/null +++ b/teuthology/task/selinux.py @@ -0,0 +1,217 @@ +import logging +import os + +from io import StringIO + +from teuthology.exceptions import SELinuxError +from teuthology.misc import get_archive_dir +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra import run + +from teuthology.task import Task + +log = logging.getLogger(__name__) + + +class SELinux(Task): + """ + A task to set the SELinux mode during test execution. Note that SELinux + must first be enabled and the filesystem must have been labeled. + + On teardown, also checks the audit log for any denials. + By default selinux will ignore few known denials(listed below). The test + will fail for any other denials seen in audit.log. For the test not to + fail for other denials one can add the overrides with appropriate escapes + overrides: + selinux: + allowlist: + - 'name="cephtest"' + - 'dmidecode' + - 'comm="logrotate"' + - 'comm="idontcare"' + + Known denials which are ignored: + comm="dmidecode" + chronyd.service + name="cephtest" + + + Automatically skips hosts running non-RPM-based OSes. + """ + def __init__(self, ctx, config): + super(SELinux, self).__init__(ctx, config) + self.log = log + self.mode = self.config.get('mode', 'permissive') + + def filter_hosts(self): + """ + Exclude any non-RPM-based hosts, and any downburst VMs + """ + super(SELinux, self).filter_hosts() + new_cluster = Cluster() + for (remote, roles) in self.cluster.remotes.items(): + if remote.is_vm: + msg = "Excluding {host}: VMs are not yet supported" + log.info(msg.format(host=remote.shortname)) + elif remote.is_container: + msg = "Excluding {host}: containers are not yet supported" + log.info(msg.format(host=remote.shortname)) + elif remote.os.name in ['opensuse', 'sle']: + msg = "Excluding {host}: \ + SELinux is not supported for '{os}' os_type yet" + log.info(msg.format(host=remote.shortname, os=remote.os.name)) + elif remote.os.package_type == 'rpm': + new_cluster.add(remote, roles) + else: + msg = "Excluding {host}: OS '{os}' does not support SELinux" + log.debug(msg.format(host=remote.shortname, os=remote.os.name)) + self.cluster = new_cluster + return self.cluster + + def setup(self): + super(SELinux, self).setup() + self.rotate_log() + self.old_modes = self.get_modes() + self.old_denials = self.get_denials() + self.set_mode() + + def rotate_log(self): + self.cluster.run(args="sudo service auditd rotate") + + def get_modes(self): + """ + Get the current SELinux mode from each host so that we can restore + during teardown + """ + + log.debug("Getting current SELinux state") + modes = dict() + for remote in self.cluster.remotes.keys(): + result = remote.run( + args=['/usr/sbin/getenforce'], + stdout=StringIO(), + ) + modes[remote.name] = result.stdout.getvalue().strip().lower() + log.debug("Existing SELinux modes: %s", modes) + return modes + + def set_mode(self): + """ + Set the requested SELinux mode + """ + log.info("Putting SELinux into %s mode", self.mode) + for remote in self.cluster.remotes.keys(): + mode = self.old_modes[remote.name] + if mode == "Disabled" or mode == "disabled": + continue + remote.run( + args=['sudo', '/usr/sbin/setenforce', self.mode], + ) + + def get_denials(self): + """ + Look for denials in the audit log + """ + all_denials = dict() + # dmidecode issue: + # https://bugzilla.redhat.com/show_bug.cgi?id=1289274 + # tracker for chronyd/cephtest issue: + # http://tracker.ceph.com/issues/14244 + known_denials = [ + 'comm="dmidecode"', + 'chronyd.service', + 'name="cephtest"', + 'scontext=system_u:system_r:nrpe_t:s0', + 'scontext=system_u:system_r:pcp_pmlogger_t', + 'scontext=system_u:system_r:pcp_pmcd_t:s0', + 'comm="rhsmd"', + 'scontext=system_u:system_r:syslogd_t:s0', + 'tcontext=system_u:system_r:nrpe_t:s0', + 'comm="updatedb"', + 'comm="smartd"', + 'comm="rhsmcertd-worke"', + 'comm="setroubleshootd"', + 'comm="rpm"', + 'tcontext=system_u:object_r:container_runtime_exec_t:s0', + 'comm="ksmtuned"', + 'comm="sssd"', + 'comm="sss_cache"', + 'context=system_u:system_r:NetworkManager_dispatcher_t:s0', + ] + se_allowlist = self.config.get('allowlist', []) + if se_allowlist: + known_denials.extend(se_allowlist) + ignore_known_denials = '\'\(' + str.join('\|', known_denials) + '\)\'' + for remote in self.cluster.remotes.keys(): + proc = remote.run( + args=['sudo', 'grep', '-a', 'avc: .*denied', + '/var/log/audit/audit.log', run.Raw('|'), 'grep', '-av', + run.Raw(ignore_known_denials)], + stdout=StringIO(), + check_status=False, + ) + output = proc.stdout.getvalue() + if output: + denials = output.strip().split('\n') + log.debug("%s has %s denials", remote.name, len(denials)) + else: + denials = [] + all_denials[remote.name] = denials + return all_denials + + def teardown(self): + self.restore_modes() + self.archive_log() + self.get_new_denials() + + def restore_modes(self): + """ + If necessary, restore previous SELinux modes + """ + # If there's nothing to do, skip this + if not set(self.old_modes.values()).difference(set([self.mode])): + return + log.info("Restoring old SELinux modes") + for remote in self.cluster.remotes.keys(): + mode = self.old_modes[remote.name] + if mode == "Disabled" or mode == "disabled": + continue + if mode != self.mode: + remote.run( + args=['sudo', '/usr/sbin/setenforce', mode], + ) + + def archive_log(self): + if not hasattr(self.ctx, 'archive') or not self.ctx.archive: + return + archive_dir = get_archive_dir(self.ctx) + audit_archive = os.path.join(archive_dir, 'audit') + mkdir_cmd = "mkdir {audit_archive}" + cp_cmd = "sudo cp /var/log/audit/audit.log {audit_archive}" + chown_cmd = "sudo chown $USER {audit_archive}/audit.log" + gzip_cmd = "gzip {audit_archive}/audit.log" + full_cmd = " && ".join((mkdir_cmd, cp_cmd, chown_cmd, gzip_cmd)) + self.cluster.run( + args=full_cmd.format(audit_archive=audit_archive) + ) + + def get_new_denials(self): + """ + Determine if there are any new denials in the audit log + """ + all_denials = self.get_denials() + new_denials = dict() + for remote in self.cluster.remotes.keys(): + old_host_denials = self.old_denials[remote.name] + all_host_denials = all_denials[remote.name] + new_host_denials = set(all_host_denials).difference( + set(old_host_denials) + ) + new_denials[remote.name] = list(new_host_denials) + + for remote in self.cluster.remotes.keys(): + if len(new_denials[remote.name]): + raise SELinuxError(node=remote, + denials=new_denials[remote.name]) + +task = SELinux diff --git a/teuthology/task/sequential.py b/teuthology/task/sequential.py new file mode 100644 index 0000000000..2414336fe2 --- /dev/null +++ b/teuthology/task/sequential.py @@ -0,0 +1,58 @@ +""" +Task sequencer +""" +import sys +import logging + +from teuthology import run_tasks + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Sequentialize a group of tasks into one executable block + + example:: + + - sequential: + - tasktest: + - tasktest: + + You can also reference the job from elsewhere:: + + foo: + tasktest: + tasks: + - sequential: + - tasktest: + - foo + - tasktest: + + That is, if the entry is not a dict, we will look it up in the top-level + config. + + Sequential tasks and Parallel tasks can be nested. + + :param ctx: Context + :param config: Configuration + """ + stack = [] + try: + for entry in config: + if not isinstance(entry, dict): + entry = ctx.config.get(entry, {}) + ((taskname, confg),) = entry.items() + log.info('In sequential, running task %s...' % taskname) + mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg) + if hasattr(mgr, '__enter__'): + mgr.__enter__() + stack.append(mgr) + finally: + try: + exc_info = sys.exc_info() + while stack: + mgr = stack.pop() + mgr.__exit__(*exc_info) + finally: + del exc_info diff --git a/teuthology/task/sleep.py b/teuthology/task/sleep.py new file mode 100644 index 0000000000..bd6d445446 --- /dev/null +++ b/teuthology/task/sleep.py @@ -0,0 +1,32 @@ +""" +Sleep task +""" +import logging +import time + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Sleep for some number of seconds. + + Example:: + + + tasks: + - install: + - ceph: + - sleep: + duration: 10 + - interactive: + + :param ctx: Context + :param config: Configuration + """ + if not config: + config = {} + assert isinstance(config, dict) + duration = int(config.get('duration', 5)) + log.info('Sleeping for {} seconds'.format(duration)) + time.sleep(duration) diff --git a/teuthology/task/ssh_keys.py b/teuthology/task/ssh_keys.py new file mode 100644 index 0000000000..f7e0dba32c --- /dev/null +++ b/teuthology/task/ssh_keys.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +""" +Ssh-key key handlers and associated routines +""" +import contextlib +import logging +import paramiko +import re +from datetime import datetime + +from io import StringIO +from teuthology import contextutil +import teuthology.misc as misc +from teuthology.orchestra import run + +log = logging.getLogger(__name__) +ssh_keys_user = 'ssh-keys-user' + + +def timestamp(format_='%Y-%m-%d_%H:%M:%S:%f'): + """ + Return a UTC timestamp suitable for use in filenames + """ + return datetime.utcnow().strftime(format_) + + +def backup_file(remote, path, sudo=False): + """ + Creates a backup of a file on the remote, simply by copying it and adding a + timestamp to the name. + """ + backup_path = "{path}_{timestamp}".format( + path=path, timestamp=timestamp() + ) + args = [ + 'cp', '-v', '-a', path, backup_path, + ] + if sudo: + args.insert(0, 'sudo') + remote.run(args=args) + return backup_path + + +def generate_keys(): + """ + Generatees a public and private key + """ + key = paramiko.RSAKey.generate(2048) + privateString = StringIO() + key.write_private_key(privateString) + return key.get_base64(), privateString.getvalue() + +def particular_ssh_key_test(line_to_test, ssh_key): + """ + Check the validity of the ssh_key + """ + match = re.match('[\w-]+ {key} \S+@\S+'.format(key=re.escape(ssh_key)), line_to_test) + + if match: + return False + else: + return True + +def ssh_keys_user_line_test(line_to_test, username ): + """ + Check the validity of the username + """ + match = re.match('[\w-]+ \S+ {username}@\S+'.format(username=username), line_to_test) + + if match: + return False + else: + return True + +def cleanup_added_key(ctx, key_backup_files, path): + """ + Delete the keys and removes ~/.ssh/authorized_keys entries we added + """ + log.info('cleaning up keys added for testing') + + for remote in ctx.cluster.remotes: + username, hostname = str(remote).split('@') + if "" == username or "" == hostname: + continue + else: + log.info(' cleaning up keys for user {user} on {host}'.format(host=hostname, user=username)) + misc.delete_file(remote, '/home/{user}/.ssh/id_rsa'.format(user=username)) + misc.delete_file(remote, '/home/{user}/.ssh/id_rsa.pub'.format(user=username)) + misc.move_file(remote, key_backup_files[remote], path) + +@contextlib.contextmanager +def tweak_ssh_config(ctx, config): + """ + Turn off StrictHostKeyChecking + """ + run.wait( + ctx.cluster.run( + args=[ + 'echo', + 'StrictHostKeyChecking no\n', + run.Raw('>'), + run.Raw('/home/ubuntu/.ssh/config'), + run.Raw('&&'), + 'echo', + 'UserKnownHostsFile ', + run.Raw('/dev/null'), + run.Raw('>>'), + run.Raw('/home/ubuntu/.ssh/config'), + run.Raw('&&'), + run.Raw('chmod 600 /home/ubuntu/.ssh/config'), + ], + wait=False, + ) + ) + + try: + yield + + finally: + run.wait( + ctx.cluster.run( + args=['rm',run.Raw('/home/ubuntu/.ssh/config')], + wait=False + ), + ) + +@contextlib.contextmanager +def push_keys_to_host(ctx, config, public_key, private_key): + """ + Push keys to all hosts + """ + log.info('generated public key {pub_key}'.format(pub_key=public_key)) + + # add an entry for all hosts in ctx to auth_keys_data + auth_keys_data = '' + + for inner_host in ctx.cluster.remotes.keys(): + inner_username, inner_hostname = str(inner_host).split('@') + # create a 'user@hostname' string using our fake hostname + fake_hostname = '{user}@{host}'.format(user=ssh_keys_user, host=str(inner_hostname)) + auth_keys_data += '\nssh-rsa {pub_key} {user_host}\n'.format(pub_key=public_key, user_host=fake_hostname) + + key_backup_files = dict() + # for each host in ctx, add keys for all other hosts + for remote in ctx.cluster.remotes: + username, hostname = str(remote).split('@') + if "" == username or "" == hostname: + continue + else: + log.info('pushing keys to {host} for {user}'.format(host=hostname, user=username)) + + # adding a private key + priv_key_file = '/home/{user}/.ssh/id_rsa'.format(user=username) + priv_key_data = '{priv_key}'.format(priv_key=private_key) + misc.delete_file(remote, priv_key_file, force=True) + # Hadoop requires that .ssh/id_rsa have permissions of '500' + remote.write_file(priv_key_file, priv_key_data, mode='0500') + + # then a private key + pub_key_file = '/home/{user}/.ssh/id_rsa.pub'.format(user=username) + pub_key_data = 'ssh-rsa {pub_key} {user_host}'.format(pub_key=public_key, user_host=str(remote)) + misc.delete_file(remote, pub_key_file, force=True) + remote.write_file(pub_key_file, pub_key_data) + + # add appropriate entries to the authorized_keys file for this host + auth_keys_file = '/home/{user}/.ssh/authorized_keys'.format( + user=username) + key_backup_files[remote] = backup_file(remote, auth_keys_file) + misc.append_lines_to_file(remote, auth_keys_file, auth_keys_data) + + try: + yield + + finally: + # cleanup the keys + log.info("Cleaning up SSH keys") + cleanup_added_key(ctx, key_backup_files, auth_keys_file) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Creates a set of RSA keys, distributes the same key pair + to all hosts listed in ctx.cluster, and adds all hosts + to all others authorized_keys list. + + During cleanup it will delete .ssh/id_rsa, .ssh/id_rsa.pub + and remove the entries in .ssh/authorized_keys while leaving + pre-existing entries in place. + """ + + if config is None: + config = {} + assert isinstance(config, dict), \ + "task hadoop only supports a dictionary for configuration" + + # this does not need to do cleanup and does not depend on + # ctx, so I'm keeping it outside of the nested calls + public_key_string, private_key_string = generate_keys() + + with contextutil.nested( + lambda: tweak_ssh_config(ctx, config), + lambda: push_keys_to_host(ctx, config, public_key_string, private_key_string), + #lambda: tweak_ssh_config(ctx, config), + ): + yield + diff --git a/teuthology/task/tasktest.py b/teuthology/task/tasktest.py new file mode 100644 index 0000000000..40926c569c --- /dev/null +++ b/teuthology/task/tasktest.py @@ -0,0 +1,50 @@ +""" +Parallel and sequential task tester. Not used by any ceph tests, but used to +unit test the parallel and sequential tasks +""" +import logging +import contextlib +import time + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Task that just displays information when it is create and when it is + destroyed/cleaned up. This task was used to test parallel and + sequential task options. + + example:: + + tasks: + - sequential: + - tasktest: + - id: 'foo' + - tasktest: + - id: 'bar' + - delay:5 + - tasktest: + + The above yaml will sequentially start a test task named foo and a test + task named bar. Bar will take 5 seconds to complete. After foo and bar + have finished, an unidentified tasktest task will run. + """ + try: + delay = config.get('delay', 0) + id = config.get('id', 'UNKNOWN') + except AttributeError: + delay = 0 + id = 'UNKNOWN' + try: + log.info('**************************************************') + log.info('Started task test -- %s' % id) + log.info('**************************************************') + time.sleep(delay) + yield + + finally: + log.info('**************************************************') + log.info('Task test is being cleaned up -- %s' % id) + log.info('**************************************************') + diff --git a/teuthology/task/tests/__init__.py b/teuthology/task/tests/__init__.py new file mode 100644 index 0000000000..43c6c11699 --- /dev/null +++ b/teuthology/task/tests/__init__.py @@ -0,0 +1,107 @@ +""" +This task is used to integration test teuthology. Including this +task in your yaml config will execute pytest which finds any tests in +the current directory. Each test that is discovered will be passed the +teuthology ctx and config args that each teuthology task usually gets. +This allows the tests to operate against the cluster. + +An example:: + + tasks + - tests: + +""" +import logging +import pytest + +from teuthology.job_status import set_status + + +log = logging.getLogger(__name__) + + +@pytest.fixture +def ctx(): + return {} + + +@pytest.fixture +def config(): + return [] + + +class TeuthologyContextPlugin(object): + def __init__(self, ctx, config): + self.ctx = ctx + self.config = config + self.failures = list() + + # this is pytest hook for generating tests with custom parameters + def pytest_generate_tests(self, metafunc): + # pass the teuthology ctx and config to each test method + if "ctx" in metafunc.fixturenames and \ + "config" in metafunc.fixturenames: + metafunc.parametrize(["ctx", "config"], [(self.ctx, self.config),]) + + # log the outcome of each test + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_makereport(self, item, call): + outcome = yield + report = outcome.get_result() + + # after the test has been called, get its report and log it + if call.when == 'call': + # item.location[0] is a slash delimeted path to the test file + # being ran. We only want the portion after teuthology.task.tests + test_path = item.location[0].replace("/", ".").split(".") + test_path = ".".join(test_path[4:-1]) + # removes the string '[ctx0, config0]' after the test name + test_name = item.location[2].split("[")[0] + name = "{path}:{name}".format(path=test_path, name=test_name) + if report.passed: + log.info("{name} Passed".format(name=name)) + elif report.skipped: + log.info("{name} {info}".format( + name=name, + info=call.excinfo.exconly() + )) + else: + # TODO: figure out a way to log the traceback + log.error("{name} Failed:\n {info}".format( + name=name, + info=call.excinfo.exconly() + )) + failure = "{name}: {err}".format( + name=name, + err=call.excinfo.exconly().replace("\n", "") + ) + self.failures.append(failure) + self.ctx.summary['failure_reason'] = self.failures + + return report + + +def task(ctx, config): + """ + Use pytest to recurse through this directory, finding any tests + and then executing them with the teuthology ctx and config args. + Your tests must follow standard pytest conventions to be discovered. + """ + try: + status = pytest.main( + args=[ + '-q', + '--pyargs', __name__, 'teuthology.test' + ], + plugins=[TeuthologyContextPlugin(ctx, config)] + ) + except Exception: + log.exception("Saw non-test failure!") + set_status(ctx.summary, "dead") + else: + if status == 0: + log.info("OK. All tests passed!") + set_status(ctx.summary, "pass") + else: + log.error("FAIL. Saw test failures...") + set_status(ctx.summary, "fail") diff --git a/teuthology/task/tests/test_locking.py b/teuthology/task/tests/test_locking.py new file mode 100644 index 0000000000..05b0f45ad3 --- /dev/null +++ b/teuthology/task/tests/test_locking.py @@ -0,0 +1,25 @@ +import pytest + + +class TestLocking(object): + + def test_correct_os_type(self, ctx, config): + os_type = ctx.config.get("os_type") + if os_type is None: + pytest.skip('os_type was not defined') + for remote in ctx.cluster.remotes.keys(): + assert remote.os.name == os_type + + def test_correct_os_version(self, ctx, config): + os_version = ctx.config.get("os_version") + if os_version is None: + pytest.skip('os_version was not defined') + if ctx.config.get("os_type") == "debian": + pytest.skip('known issue with debian versions; see: issue #10878') + for remote in ctx.cluster.remotes.keys(): + assert remote.inventory_info['os_version'] == os_version + + def test_correct_machine_type(self, ctx, config): + machine_type = ctx.machine_type + for remote in ctx.cluster.remotes.keys(): + assert remote.machine_type in machine_type diff --git a/teuthology/task/tests/test_run.py b/teuthology/task/tests/test_run.py new file mode 100644 index 0000000000..f86b0b4f13 --- /dev/null +++ b/teuthology/task/tests/test_run.py @@ -0,0 +1,40 @@ +import logging +import pytest + +from io import StringIO + +from teuthology.exceptions import CommandFailedError + +log = logging.getLogger(__name__) + + +class TestRun(object): + """ + Tests to see if we can make remote procedure calls to the current cluster + """ + + def test_command_failed_label(self, ctx, config): + result = "" + try: + ctx.cluster.run( + args=["python3", "-c", "assert False"], + label="working as expected, nothing to see here" + ) + except CommandFailedError as e: + result = str(e) + + assert "working as expected" in result + + def test_command_failed_no_label(self, ctx, config): + with pytest.raises(CommandFailedError): + ctx.cluster.run( + args=["python3", "-c", "assert False"], + ) + + def test_command_success(self, ctx, config): + result = StringIO() + ctx.cluster.run( + args=["python3", "-c", "print('hi')"], + stdout=result + ) + assert result.getvalue().strip() == "hi" diff --git a/teuthology/task/timer.py b/teuthology/task/timer.py new file mode 100644 index 0000000000..2abf188275 --- /dev/null +++ b/teuthology/task/timer.py @@ -0,0 +1,46 @@ +""" +Timer task +""" +import logging +import contextlib +import datetime + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Timer + + Measure the time that this set of tasks takes and save that value in the summary file. + Config is a description of what we are timing. + + example:: + + tasks: + - ceph: + - foo: + - timer: "fsx run" + - fsx: + + """ + start = datetime.datetime.now() + log.debug("got here in timer") + try: + yield + finally: + nowinfo = datetime.datetime.now() + elapsed = nowinfo - start + datesaved = nowinfo.isoformat(' ') + hourz, remainder = divmod(elapsed.seconds, 3600) + minutez, secondz = divmod(remainder, 60) + elapsedtime = "%02d:%02d:%02d.%06d" % (hourz,minutez,secondz, elapsed.microseconds) + dateinfo = (datesaved, elapsedtime) + if not 'timer' in ctx.summary: + ctx.summary['timer'] = {config : [dateinfo]} + else: + if config in ctx.summary['timer']: + ctx.summary['timer'][config].append(dateinfo) + else: + ctx.summary['timer'][config] = [dateinfo] + log.info('Elapsed time for %s -- %s' % (config,elapsedtime)) diff --git a/teuthology/templates/email-sleep-before-teardown.jinja2 b/teuthology/templates/email-sleep-before-teardown.jinja2 new file mode 100644 index 0000000000..9cc054f408 --- /dev/null +++ b/teuthology/templates/email-sleep-before-teardown.jinja2 @@ -0,0 +1,10 @@ +Teuthology job {{ run_name }}/{{ job_id }} has fallen asleep at {{ sleep_date }} for {{ sleep_time }} + +Owner: {{ owner }} +Suite Name: {{ suite_name }} +Sleep Date: {{ sleep_date }} +Sleep Time: {{ sleep_time_sec }} seconds ({{ sleep_time }}) +Job Info: {{ job_info }} +Job Logs: {{ job_logs }} +Task Stack: {{ task_stack }} +Current Status: {{ status }} diff --git a/teuthology/templates/rocketchat-sleep-before-teardown.jinja2 b/teuthology/templates/rocketchat-sleep-before-teardown.jinja2 new file mode 100644 index 0000000000..4109ec5a03 --- /dev/null +++ b/teuthology/templates/rocketchat-sleep-before-teardown.jinja2 @@ -0,0 +1,6 @@ +The teuthology job [{{ job_id }}]({{ job_info }}) for suite *{{ suite_name }}* owned by '{{ owner }}' has fallen asleep with status '{{ status }}' at {{ sleep_date }} for __{{ sleep_time }}__ ({{ sleep_time_sec }} seconds). +Open [teuthology.log]({{ job_logs }}teuthology.log) for details, or go to [all logs]({{ job_logs}}). + +Job Description: {{ job_desc }} +Run Name: {{ run_name }} +Task Stack: {{ task_stack }} diff --git a/teuthology/test/__init__.py b/teuthology/test/__init__.py new file mode 100644 index 0000000000..1eb9e81084 --- /dev/null +++ b/teuthology/test/__init__.py @@ -0,0 +1,9 @@ +import os +import pytest +import sys + +skipif_teuthology_process = pytest.mark.skipif( + os.path.basename(sys.argv[0]) == "teuthology", + reason="Skipped because this test cannot pass when run in a teuthology " \ + "process (as opposed to py.test)" +) \ No newline at end of file diff --git a/teuthology/test/fake_archive.py b/teuthology/test/fake_archive.py new file mode 100644 index 0000000000..76a944f46c --- /dev/null +++ b/teuthology/test/fake_archive.py @@ -0,0 +1,107 @@ +import os +import shutil +import yaml +import random + + +class FakeArchive(object): + def __init__(self, archive_base="./test_archive"): + self.archive_base = archive_base + + def get_random_metadata(self, run_name, job_id=None, hung=False): + """ + Generate a random info dict for a fake job. If 'hung' is not True, also + generate a summary dict. + + :param run_name: Run name e.g. 'test_foo' + :param job_id: Job ID e.g. '12345' + :param hung: Simulate a hung job e.g. don't return a summary.yaml + :return: A dict with keys 'job_id', 'info' and possibly + 'summary', with corresponding values + """ + rand = random.Random() + + description = 'description for job with id %s' % job_id + owner = 'job@owner' + duration = rand.randint(1, 36000) + pid = rand.randint(1000, 99999) + job_id = rand.randint(1, 99999) + + info = { + 'description': description, + 'job_id': job_id, + 'run_name': run_name, + 'owner': owner, + 'pid': pid, + } + + metadata = { + 'info': info, + 'job_id': job_id, + } + + if not hung: + success = True if rand.randint(0, 1) != 1 else False + + summary = { + 'description': description, + 'duration': duration, + 'owner': owner, + 'success': success, + } + + if not success: + summary['failure_reason'] = 'Failure reason!' + metadata['summary'] = summary + + return metadata + + def setup(self): + if os.path.exists(self.archive_base): + shutil.rmtree(self.archive_base) + os.mkdir(self.archive_base) + + def teardown(self): + shutil.rmtree(self.archive_base) + + def populate_archive(self, run_name, jobs): + run_archive_dir = os.path.join(self.archive_base, run_name) + os.mkdir(run_archive_dir) + for job in jobs: + archive_dir = os.path.join(run_archive_dir, str(job['job_id'])) + os.mkdir(archive_dir) + + with open(os.path.join(archive_dir, 'info.yaml'), 'w') as yfile: + yaml.safe_dump(job['info'], yfile) + + if 'summary' in job: + summary_path = os.path.join(archive_dir, 'summary.yaml') + with open(summary_path, 'w') as yfile: + yaml.safe_dump(job['summary'], yfile) + + def create_fake_run(self, run_name, job_count, yaml_path, num_hung=0): + """ + Creates a fake run using run_name. Uses the YAML specified for each + job's config.yaml + + Returns a list of job_ids + """ + assert os.path.exists(yaml_path) + assert job_count > 0 + jobs = [] + made_hung = 0 + for i in range(job_count): + if made_hung < num_hung: + jobs.append(self.get_random_metadata(run_name, hung=True)) + made_hung += 1 + else: + jobs.append(self.get_random_metadata(run_name, hung=False)) + #job_config = yaml.safe_load(yaml_path) + self.populate_archive(run_name, jobs) + for job in jobs: + job_id = job['job_id'] + job_yaml_path = os.path.join(self.archive_base, run_name, + str(job_id), 'config.yaml') + shutil.copyfile(yaml_path, job_yaml_path) + return jobs + diff --git a/teuthology/test/fake_fs.py b/teuthology/test/fake_fs.py new file mode 100644 index 0000000000..c5cb6e4f05 --- /dev/null +++ b/teuthology/test/fake_fs.py @@ -0,0 +1,90 @@ +from io import BytesIO +from contextlib import closing + + +try: + FileNotFoundError, NotADirectoryError +except NameError: + FileNotFoundError = NotADirectoryError = OSError + + +def make_fake_fstools(fake_filesystem): + """ + Build fake versions of os.listdir(), os.isfile(), etc. for use in + unit tests + + An example fake_filesystem value: + >>> fake_fs = {\ + 'a_directory': {\ + 'another_directory': {\ + 'empty_file': None,\ + 'another_empty_file': None,\ + },\ + 'random_file': None,\ + 'yet_another_directory': {\ + 'empty_directory': {},\ + },\ + 'file_with_contents': 'data',\ + },\ + } + >>> fake_listdir, fake_isfile, _, _ = \ + make_fake_fstools(fake_fs) + >>> fake_listdir('a_directory/yet_another_directory') + ['empty_directory'] + >>> fake_isfile('a_directory/yet_another_directory') + False + + :param fake_filesystem: A dict representing a filesystem + """ + assert isinstance(fake_filesystem, dict) + + def fake_listdir(path, fsdict=False): + if fsdict is False: + fsdict = fake_filesystem + + remainder = path.strip('/') + '/' + subdict = fsdict + while '/' in remainder: + next_dir, remainder = remainder.split('/', 1) + if next_dir not in subdict: + raise FileNotFoundError( + '[Errno 2] No such file or directory: %s' % next_dir) + subdict = subdict.get(next_dir) + if not isinstance(subdict, dict): + raise NotADirectoryError('[Errno 20] Not a directory: %s' % next_dir) + if subdict and not remainder: + return list(subdict) + return [] + + def fake_isfile(path, fsdict=False): + if fsdict is False: + fsdict = fake_filesystem + + components = path.strip('/').split('/') + subdict = fsdict + for component in components: + if component not in subdict: + raise FileNotFoundError('[Errno 2] No such file or directory: %s' % component) + subdict = subdict.get(component) + return subdict is None or isinstance(subdict, str) + + def fake_isdir(path, fsdict=False): + return not fake_isfile(path) + + def fake_exists(path, fsdict=False): + return fake_isfile(path, fsdict) or fake_isdir(path, fsdict) + + def fake_open(path, mode=None, buffering=None): + components = path.strip('/').split('/') + subdict = fake_filesystem + for component in components: + if component not in subdict: + raise FileNotFoundError('[Errno 2] No such file or directory: %s' % component) + subdict = subdict.get(component) + if isinstance(subdict, dict): + raise IOError('[Errno 21] Is a directory: %s' % path) + elif subdict is None: + return closing(BytesIO(b'')) + return closing(BytesIO(subdict.encode())) + + return fake_exists, fake_listdir, fake_isfile, fake_isdir, fake_open diff --git a/teuthology/test/integration/__init__.py b/teuthology/test/integration/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/test/integration/test_suite.py b/teuthology/test/integration/test_suite.py new file mode 100644 index 0000000000..04a4c122b6 --- /dev/null +++ b/teuthology/test/integration/test_suite.py @@ -0,0 +1,86 @@ +import os +import requests +from pytest import raises, skip + +from teuthology.config import config +from teuthology import suite + + +class TestSuiteOnline(object): + def setup(self): + if 'TEST_ONLINE' not in os.environ: + skip("To run these sets, set the environment variable TEST_ONLINE") + + def test_ceph_hash_simple(self): + resp = requests.get( + 'https://api.github.com/repos/ceph/ceph/git/refs/heads/main') + ref_hash = resp.json()['object']['sha'] + assert suite.get_hash('ceph') == ref_hash + + def test_kernel_hash_saya(self): + # We don't currently have these packages. + assert suite.get_hash('kernel', 'main', 'default', 'saya') is None + + def test_all_main_branches(self): + # Don't attempt to send email + config.results_email = None + job_config = suite.create_initial_config('suite', 'main', + 'main', 'main', 'testing', + 'default', 'centos', 'plana') + assert ((job_config.branch, job_config.teuthology_branch, + job_config.suite_branch) == ('main', 'main', 'main')) + + def test_config_bogus_kernel_branch(self): + # Don't attempt to send email + config.results_email = None + with raises(suite.ScheduleFailError): + suite.create_initial_config('s', None, 'main', 't', + 'bogus_kernel_branch', 'f', 'd', 'm') + + def test_config_bogus_flavor(self): + # Don't attempt to send email + config.results_email = None + with raises(suite.ScheduleFailError): + suite.create_initial_config('s', None, 'main', 't', 'k', + 'bogus_flavor', 'd', 'm') + + def test_config_bogus_ceph_branch(self): + # Don't attempt to send email + config.results_email = None + with raises(suite.ScheduleFailError): + suite.create_initial_config('s', None, 'bogus_ceph_branch', 't', + 'k', 'f', 'd', 'm') + + def test_config_bogus_suite_branch(self): + # Don't attempt to send email + config.results_email = None + with raises(suite.ScheduleFailError): + suite.create_initial_config('s', 'bogus_suite_branch', 'main', + 't', 'k', 'f', 'd', 'm') + + def test_config_bogus_teuthology_branch(self): + # Don't attempt to send email + config.results_email = None + with raises(suite.ScheduleFailError): + suite.create_initial_config('s', None, 'main', + 'bogus_teuth_branch', 'k', 'f', 'd', + 'm') + + def test_config_substitution(self): + # Don't attempt to send email + config.results_email = None + job_config = suite.create_initial_config('MY_SUITE', 'main', + 'main', 'main', 'testing', + 'default', 'centos', 'plana') + assert job_config['suite'] == 'MY_SUITE' + + def test_config_kernel_section(self): + # Don't attempt to send email + config.results_email = None + job_config = suite.create_initial_config('MY_SUITE', 'main', + 'main', 'main', 'testing', + 'default', 'centos', 'plana') + assert job_config['kernel']['kdb'] is True + + +# maybe use notario for the above? diff --git a/teuthology/test/task/__init__.py b/teuthology/test/task/__init__.py new file mode 100644 index 0000000000..6fae57a810 --- /dev/null +++ b/teuthology/test/task/__init__.py @@ -0,0 +1,205 @@ +from mock import patch, DEFAULT +from pytest import raises + +from teuthology.config import FakeNamespace +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra.remote import Remote +from teuthology.task import Task + + +class TestTask(object): + klass = Task + task_name = 'task' + + def setup(self): + self.ctx = FakeNamespace() + self.ctx.config = dict() + self.task_config = dict() + + def test_overrides(self): + self.ctx.config['overrides'] = dict() + self.ctx.config['overrides'][self.task_name] = dict( + key_1='overridden', + ) + self.task_config.update(dict( + key_1='default', + key_2='default', + )) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + assert task.config['key_1'] == 'overridden' + assert task.config['key_2'] == 'default' + + def test_hosts_no_filter(self): + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2'), ['role2']) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task_hosts = list(task.cluster.remotes) + assert len(task_hosts) == 2 + assert sorted(host.shortname for host in task_hosts) == \ + ['remote1', 'remote2'] + + def test_hosts_no_results(self): + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['role1']) + self.task_config.update(dict( + hosts=['role2'], + )) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with raises(RuntimeError): + with self.klass(self.ctx, self.task_config): + pass + + def test_hosts_one_role(self): + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2'), ['role2']) + self.task_config.update(dict( + hosts=['role1'], + )) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task_hosts = list(task.cluster.remotes) + assert len(task_hosts) == 1 + assert task_hosts[0].shortname == 'remote1' + + def test_hosts_two_roles(self): + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2'), ['role2']) + self.ctx.cluster.add(Remote('user@remote3'), ['role3']) + self.task_config.update(dict( + hosts=['role1', 'role3'], + )) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task_hosts = list(task.cluster.remotes) + assert len(task_hosts) == 2 + hostnames = [host.shortname for host in task_hosts] + assert sorted(hostnames) == ['remote1', 'remote3'] + + def test_hosts_two_hostnames(self): + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1.example.com'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2.example.com'), ['role2']) + self.ctx.cluster.add(Remote('user@remote3.example.com'), ['role3']) + self.task_config.update(dict( + hosts=['remote1', 'remote2.example.com'], + )) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task_hosts = list(task.cluster.remotes) + assert len(task_hosts) == 2 + hostnames = [host.hostname for host in task_hosts] + assert sorted(hostnames) == ['remote1.example.com', + 'remote2.example.com'] + + def test_hosts_one_role_one_hostname(self): + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1.example.com'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2.example.com'), ['role2']) + self.ctx.cluster.add(Remote('user@remote3.example.com'), ['role3']) + self.task_config.update(dict( + hosts=['role1', 'remote2.example.com'], + )) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task_hosts = list(task.cluster.remotes) + assert len(task_hosts) == 2 + hostnames = [host.hostname for host in task_hosts] + assert sorted(hostnames) == ['remote1.example.com', + 'remote2.example.com'] + + def test_setup_called(self): + with patch.multiple( + self.klass, + setup=DEFAULT, + begin=DEFAULT, + end=DEFAULT, + teardown=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task.setup.assert_called_once_with() + + def test_begin_called(self): + with patch.multiple( + self.klass, + setup=DEFAULT, + begin=DEFAULT, + end=DEFAULT, + teardown=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task.begin.assert_called_once_with() + + def test_end_called(self): + self.task_config.update(dict()) + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + pass + task.end.assert_called_once_with() + + def test_teardown_called(self): + self.task_config.update(dict()) + with patch.multiple( + self.klass, + setup=DEFAULT, + begin=DEFAULT, + end=DEFAULT, + teardown=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + pass + task.teardown.assert_called_once_with() + + def test_skip_teardown(self): + self.task_config.update(dict( + skip_teardown=True, + )) + + def fake_teardown(self): + assert False + + with patch.multiple( + self.klass, + setup=DEFAULT, + begin=DEFAULT, + end=DEFAULT, + teardown=fake_teardown, + ): + with self.klass(self.ctx, self.task_config): + pass diff --git a/teuthology/test/task/test_ansible.py b/teuthology/test/task/test_ansible.py new file mode 100644 index 0000000000..bfef00d174 --- /dev/null +++ b/teuthology/test/task/test_ansible.py @@ -0,0 +1,625 @@ +import json +import os +import yaml + +from mock import patch, DEFAULT, Mock +from pytest import raises, mark +from teuthology.util.compat import PY3 +if PY3: + from io import StringIO as StringIO +else: + from io import BytesIO as StringIO + +from teuthology.config import config, FakeNamespace +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra.remote import Remote +from teuthology.task import ansible +from teuthology.task.ansible import Ansible, CephLab + +from teuthology.test.task import TestTask + +class TestAnsibleTask(TestTask): + klass = Ansible + task_name = 'ansible' + + def setup(self): + pass + + def setup_method(self, method): + self.ctx = FakeNamespace() + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2'), ['role2']) + self.ctx.config = dict() + self.ctx.summary = dict() + self.ctx.archive = '../' + self.task_config = dict(playbook=[]) + self.start_patchers() + + def start_patchers(self): + self.patchers = dict() + self.mocks = dict() + self.patchers['mkdtemp'] = patch( + 'teuthology.task.ansible.mkdtemp', return_value='/tmp/' + ) + m_NTF = Mock() + m_file = Mock() + m_file.name = 'file_name' + m_NTF.return_value = m_file + self.patchers['NTF'] = patch( + 'teuthology.task.ansible.NamedTemporaryFile', + m_NTF, + ) + self.patchers['file'] = patch( + 'teuthology.task.ansible.open', create=True) + self.patchers['os_mkdir'] = patch( + 'teuthology.task.ansible.os.mkdir', + ) + self.patchers['os_remove'] = patch( + 'teuthology.task.ansible.os.remove', + ) + self.patchers['shutil_rmtree'] = patch( + 'teuthology.task.ansible.shutil.rmtree', + ) + for name in self.patchers.keys(): + self.start_patcher(name) + + def start_patcher(self, name): + if name not in self.mocks.keys(): + self.mocks[name] = self.patchers[name].start() + + def teardown_method(self, method): + self.stop_patchers() + + def stop_patchers(self): + for name in list(self.mocks): + self.stop_patcher(name) + + def stop_patcher(self, name): + self.patchers[name].stop() + del self.mocks[name] + + def test_setup(self): + self.task_config.update(dict( + playbook=[] + )) + + def fake_get_playbook(self): + self.playbook_file = 'fake' + + with patch.multiple( + self.klass, + find_repo=DEFAULT, + get_playbook=fake_get_playbook, + get_inventory=DEFAULT, + generate_inventory=DEFAULT, + generate_playbook=Mock(side_effect=Exception), + ): + task = self.klass(self.ctx, self.task_config) + task.setup() + + def test_setup_generate_playbook(self): + self.task_config.update(dict( + playbook=[] + )) + with patch.multiple( + self.klass, + find_repo=DEFAULT, + get_playbook=DEFAULT, + get_inventory=DEFAULT, + generate_inventory=DEFAULT, + generate_playbook=DEFAULT, + ): + task = self.klass(self.ctx, self.task_config) + task.setup() + task.generate_playbook.assert_called_once_with() + + def test_find_repo_path(self): + self.task_config.update(dict( + repo='~/my/repo', + )) + task = self.klass(self.ctx, self.task_config) + task.find_repo() + assert task.repo_path == os.path.expanduser(self.task_config['repo']) + + @patch('teuthology.task.ansible.fetch_repo') + def test_find_repo_path_remote(self, m_fetch_repo): + self.task_config.update(dict( + repo='git://fake_host/repo.git', + )) + m_fetch_repo.return_value = '/tmp/repo' + task = self.klass(self.ctx, self.task_config) + task.find_repo() + assert task.repo_path == os.path.expanduser('/tmp/repo') + + @patch('teuthology.task.ansible.fetch_repo') + def test_find_repo_http(self, m_fetch_repo): + self.task_config.update(dict( + repo='http://example.com/my/repo', + )) + task = self.klass(self.ctx, self.task_config) + task.find_repo() + m_fetch_repo.assert_called_once_with(self.task_config['repo'], + 'main') + + @patch('teuthology.task.ansible.fetch_repo') + def test_find_repo_git(self, m_fetch_repo): + self.task_config.update(dict( + repo='git@example.com/my/repo', + )) + task = self.klass(self.ctx, self.task_config) + task.find_repo() + m_fetch_repo.assert_called_once_with(self.task_config['repo'], + 'main') + + def test_playbook_none(self): + del self.task_config['playbook'] + task = self.klass(self.ctx, self.task_config) + with raises(KeyError): + task.get_playbook() + + def test_playbook_wrong_type(self): + self.task_config.update(dict( + playbook=dict(), + )) + task = self.klass(self.ctx, self.task_config) + with raises(TypeError): + task.get_playbook() + + def test_playbook_list(self): + playbook = [ + dict( + roles=['role1'], + ), + ] + self.task_config.update(dict( + playbook=playbook, + )) + task = self.klass(self.ctx, self.task_config) + task.get_playbook() + assert task.playbook == playbook + + @patch.object(ansible.requests, 'get') + def test_playbook_http(self, m_get): + m_get.return_value = Mock() + m_get.return_value.text = 'fake playbook text' + playbook = "http://example.com/my_playbook.yml" + self.task_config.update(dict( + playbook=playbook, + )) + task = self.klass(self.ctx, self.task_config) + task.get_playbook() + m_get.assert_called_once_with(playbook) + + def test_playbook_file(self): + fake_playbook = [dict(fake_playbook=True)] + fake_playbook_obj = StringIO(yaml.safe_dump(fake_playbook)) + self.task_config.update(dict( + playbook='~/fake/playbook', + )) + task = self.klass(self.ctx, self.task_config) + self.mocks['file'].return_value = fake_playbook_obj + task.get_playbook() + assert task.playbook == fake_playbook + + def test_playbook_file_missing(self): + self.task_config.update(dict( + playbook='~/fake/playbook', + )) + task = self.klass(self.ctx, self.task_config) + self.mocks['file'].side_effect = IOError + with raises(IOError): + task.get_playbook() + + def test_inventory_none(self): + self.task_config.update(dict( + playbook=[] + )) + task = self.klass(self.ctx, self.task_config) + with patch.object(ansible.os.path, 'exists') as m_exists: + m_exists.return_value = False + task.get_inventory() + assert task.inventory is None + + def test_inventory_path(self): + inventory = '/my/inventory' + self.task_config.update(dict( + playbook=[], + inventory=inventory, + )) + task = self.klass(self.ctx, self.task_config) + task.get_inventory() + assert task.inventory == inventory + assert task.generated_inventory is False + + def test_inventory_etc(self): + self.task_config.update(dict( + playbook=[] + )) + task = self.klass(self.ctx, self.task_config) + with patch.object(ansible.os.path, 'exists') as m_exists: + m_exists.return_value = True + task.get_inventory() + assert task.inventory == '/etc/ansible/hosts' + assert task.generated_inventory is False + + @mark.parametrize( + 'group_vars', + [ + dict(), + dict(all=dict(var0=0, var1=1)), + dict(foo=dict(var0=0), bar=dict(var0=1)), + ] + ) + def test_generate_inventory(self, group_vars): + self.task_config.update(dict( + playbook=[] + )) + if group_vars: + self.task_config.update(dict(group_vars=group_vars)) + task = self.klass(self.ctx, self.task_config) + hosts_file_path = '/my/hosts/inventory' + hosts_file_obj = StringIO() + hosts_file_obj.name = hosts_file_path + inventory_dir = os.path.dirname(hosts_file_path) + gv_dir = os.path.join(inventory_dir, 'group_vars') + self.mocks['mkdtemp'].return_value = inventory_dir + m_file = self.mocks['file'] + fake_files = [hosts_file_obj] + # Create StringIO object for each group_vars file + if group_vars: + fake_files += [StringIO() for i in sorted(group_vars)] + m_file.side_effect = fake_files + task.generate_inventory() + file_calls = m_file.call_args_list + # Verify the inventory file was created + assert file_calls[0][0][0] == hosts_file_path + # Verify each group_vars file was created + for gv_name, call_obj in zip(sorted(group_vars), file_calls[1:]): + gv_path = call_obj[0][0] + assert gv_path == os.path.join(gv_dir, '%s.yml' % gv_name) + # Verify the group_vars dir was created + if group_vars: + mkdir_call = self.mocks['os_mkdir'].call_args_list + assert mkdir_call[0][0][0] == gv_dir + assert task.generated_inventory is True + assert task.inventory == inventory_dir + # Verify the content of the inventory *file* + hosts_file_obj.seek(0) + assert hosts_file_obj.readlines() == [ + 'remote1\n', + 'remote2\n', + ] + # Verify the contents of each group_vars file + gv_names = sorted(group_vars) + for i in range(len(gv_names)): + gv_name = gv_names[i] + in_val = group_vars[gv_name] + gv_stringio = fake_files[1 + i] + gv_stringio.seek(0) + out_val = yaml.safe_load(gv_stringio) + assert in_val == out_val + + def test_generate_playbook(self): + playbook = [ + dict( + roles=['role1', 'role2'], + ), + ] + self.task_config.update(dict( + playbook=playbook + )) + task = self.klass(self.ctx, self.task_config) + playbook_file_path = '/my/playbook/file' + playbook_file_obj = StringIO() + playbook_file_obj.name = playbook_file_path + with patch.object(ansible, 'NamedTemporaryFile') as m_NTF: + m_NTF.return_value = playbook_file_obj + task.find_repo() + task.get_playbook() + task.generate_playbook() + m_NTF.assert_called_once_with( + prefix="teuth_ansible_playbook_", + dir=task.repo_path, + delete=False, + ) + assert task.generated_playbook is True + assert task.playbook_file == playbook_file_obj + playbook_file_obj.seek(0) + playbook_result = yaml.safe_load(playbook_file_obj) + assert playbook_result == playbook + + def test_execute_playbook(self): + playbook = '/my/playbook' + self.task_config.update(dict( + playbook=playbook + )) + fake_playbook = [dict(fake_playbook=True)] + fake_playbook_obj = StringIO(yaml.safe_dump(fake_playbook)) + fake_playbook_obj.name = playbook + self.mocks['mkdtemp'].return_value = '/inventory/dir' + + task = self.klass(self.ctx, self.task_config) + self.mocks['file'].return_value = fake_playbook_obj + task.setup() + args = task._build_args() + logger = StringIO() + with patch.object(ansible.pexpect, 'run') as m_run: + m_run.return_value = ('', 0) + with patch.object(Remote, 'reconnect') as m_reconnect: + m_reconnect.return_value = True + task.execute_playbook(_logfile=logger) + m_run.assert_called_once_with( + ' '.join(args), + cwd=task.repo_path, + logfile=logger, + withexitstatus=True, + timeout=None, + ) + + def test_execute_playbook_fail(self): + self.task_config.update(dict( + playbook=[], + )) + self.mocks['mkdtemp'].return_value = '/inventory/dir' + task = self.klass(self.ctx, self.task_config) + task.setup() + with patch.object(ansible.pexpect, 'run') as m_run: + with patch('teuthology.task.ansible.open') as m_open: + fake_failure_log = Mock() + fake_failure_log.__enter__ = Mock() + fake_failure_log.__exit__ = Mock() + m_open.return_value = fake_failure_log + m_run.return_value = ('', 1) + with raises(CommandFailedError): + task.execute_playbook() + assert task.ctx.summary.get('status') is None + + def test_build_args_no_tags(self): + self.task_config.update(dict( + playbook=[], + )) + task = self.klass(self.ctx, self.task_config) + task.setup() + args = task._build_args() + assert '--tags' not in args + + def test_build_args_tags(self): + self.task_config.update(dict( + playbook=[], + tags="user,pubkeys" + )) + task = self.klass(self.ctx, self.task_config) + task.setup() + args = task._build_args() + assert args.count('--tags') == 1 + assert args[args.index('--tags') + 1] == 'user,pubkeys' + + def test_build_args_skip_tags(self): + self.task_config.update(dict( + playbook=[], + skip_tags="user,pubkeys" + )) + task = self.klass(self.ctx, self.task_config) + task.setup() + args = task._build_args() + assert args.count('--skip-tags') == 1 + assert args[args.index('--skip-tags') + 1] == 'user,pubkeys' + + def test_build_args_no_vars(self): + self.task_config.update(dict( + playbook=[], + )) + task = self.klass(self.ctx, self.task_config) + task.setup() + args = task._build_args() + assert args.count('--extra-vars') == 1 + vars_str = args[args.index('--extra-vars') + 1].strip("'") + extra_vars = json.loads(vars_str) + assert list(extra_vars) == ['ansible_ssh_user'] + + def test_build_args_vars(self): + extra_vars = dict( + string1='value1', + list1=['item1'], + dict1=dict(key='value'), + ) + + self.task_config.update(dict( + playbook=[], + vars=extra_vars, + )) + task = self.klass(self.ctx, self.task_config) + task.setup() + args = task._build_args() + assert args.count('--extra-vars') == 1 + vars_str = args[args.index('--extra-vars') + 1].strip("'") + got_extra_vars = json.loads(vars_str) + assert 'ansible_ssh_user' in got_extra_vars + assert got_extra_vars['string1'] == extra_vars['string1'] + assert got_extra_vars['list1'] == extra_vars['list1'] + assert got_extra_vars['dict1'] == extra_vars['dict1'] + + def test_teardown_inventory(self): + self.task_config.update(dict( + playbook=[], + )) + task = self.klass(self.ctx, self.task_config) + task.generated_inventory = True + task.inventory = 'fake' + with patch.object(ansible.shutil, 'rmtree') as m_rmtree: + task.teardown() + assert m_rmtree.called_once_with('fake') + + def test_teardown_playbook(self): + self.task_config.update(dict( + playbook=[], + )) + task = self.klass(self.ctx, self.task_config) + task.generated_playbook = True + task.playbook_file = Mock() + task.playbook_file.name = 'fake' + with patch.object(ansible.os, 'remove') as m_remove: + task.teardown() + assert m_remove.called_once_with('fake') + + def test_teardown_cleanup_with_vars(self): + self.task_config.update(dict( + playbook=[], + cleanup=True, + vars=dict(yum_repos="testing"), + )) + task = self.klass(self.ctx, self.task_config) + task.inventory = "fake" + task.generated_playbook = True + task.playbook_file = Mock() + task.playbook_file.name = 'fake' + with patch.object(self.klass, 'execute_playbook') as m_execute: + with patch.object(ansible.os, 'remove'): + task.teardown() + task._build_args() + assert m_execute.called + assert 'cleanup' in task.config['vars'] + assert 'yum_repos' in task.config['vars'] + + def test_teardown_cleanup_with_no_vars(self): + self.task_config.update(dict( + playbook=[], + cleanup=True, + )) + task = self.klass(self.ctx, self.task_config) + task.inventory = "fake" + task.generated_playbook = True + task.playbook_file = Mock() + task.playbook_file.name = 'fake' + with patch.object(self.klass, 'execute_playbook') as m_execute: + with patch.object(ansible.os, 'remove'): + task.teardown() + task._build_args() + assert m_execute.called + assert 'cleanup' in task.config['vars'] + + +class TestCephLabTask(TestAnsibleTask): + klass = CephLab + task_name = 'ansible.cephlab' + + def setup(self): + super(TestCephLabTask, self).setup() + self.task_config = dict() + + def start_patchers(self): + super(TestCephLabTask, self).start_patchers() + self.patchers['fetch_repo'] = patch( + 'teuthology.task.ansible.fetch_repo', + ) + self.patchers['fetch_repo'].return_value = 'PATH' + + def fake_get_playbook(self): + self.playbook_file = Mock() + self.playbook_file.name = 'cephlab.yml' + + self.patchers['get_playbook'] = patch( + 'teuthology.task.ansible.CephLab.get_playbook', + new=fake_get_playbook, + ) + for name in self.patchers.keys(): + self.start_patcher(name) + + @patch('teuthology.task.ansible.fetch_repo') + def test_find_repo_http(self, m_fetch_repo): + repo = os.path.join(config.ceph_git_base_url, + 'ceph-cm-ansible.git') + task = self.klass(self.ctx, dict()) + task.find_repo() + m_fetch_repo.assert_called_once_with(repo, 'main') + + def test_playbook_file(self): + fake_playbook = [dict(fake_playbook=True)] + fake_playbook_obj = StringIO(yaml.safe_dump(fake_playbook)) + playbook = 'cephlab.yml' + fake_playbook_obj.name = playbook + task = self.klass(self.ctx, dict()) + task.repo_path = '/tmp/fake/repo' + self.mocks['file'].return_value = fake_playbook_obj + task.get_playbook() + assert task.playbook_file.name == playbook + + def test_generate_inventory(self): + self.task_config.update(dict( + playbook=[] + )) + task = self.klass(self.ctx, self.task_config) + hosts_file_path = '/my/hosts/file' + hosts_file_obj = StringIO() + hosts_file_obj.name = hosts_file_path + self.mocks['mkdtemp'].return_value = os.path.dirname(hosts_file_path) + self.mocks['file'].return_value = hosts_file_obj + task.generate_inventory() + assert task.generated_inventory is True + assert task.inventory == os.path.dirname(hosts_file_path) + hosts_file_obj.seek(0) + assert hosts_file_obj.readlines() == [ + '[testnodes]\n', + 'remote1\n', + 'remote2\n', + ] + + def test_fail_status_dead(self): + self.task_config.update(dict( + playbook=[], + )) + self.mocks['mkdtemp'].return_value = '/inventory/dir' + task = self.klass(self.ctx, self.task_config) + task.ctx.summary = dict() + task.setup() + with patch.object(ansible.pexpect, 'run') as m_run: + with patch('teuthology.task.ansible.open') as m_open: + fake_failure_log = Mock() + fake_failure_log.__enter__ = Mock() + fake_failure_log.__exit__ = Mock() + m_open.return_value = fake_failure_log + m_run.return_value = ('', 1) + with raises(CommandFailedError): + task.execute_playbook() + assert task.ctx.summary.get('status') == 'dead' + + def test_execute_playbook_fail(self): + self.mocks['mkdtemp'].return_value = '/inventory/dir' + task = self.klass(self.ctx, self.task_config) + task.setup() + with patch.object(ansible.pexpect, 'run') as m_run: + with patch('teuthology.task.ansible.open') as m_open: + fake_failure_log = Mock() + fake_failure_log.__enter__ = Mock() + fake_failure_log.__exit__ = Mock() + m_open.return_value = fake_failure_log + m_run.return_value = ('', 1) + with raises(CommandFailedError): + task.execute_playbook() + assert task.ctx.summary.get('status') == 'dead' + + @mark.skip("Unsupported") + def test_generate_playbook(self): + pass + + @mark.skip("Unsupported") + def test_playbook_http(self): + pass + + @mark.skip("Unsupported") + def test_playbook_none(self): + pass + + @mark.skip("Unsupported") + def test_playbook_wrong_type(self): + pass + + @mark.skip("Unsupported") + def test_playbook_list(self): + pass + + @mark.skip("Test needs to be reimplemented for this class") + def test_playbook_file_missing(self): + pass diff --git a/teuthology/test/task/test_ceph_ansible.py b/teuthology/test/task/test_ceph_ansible.py new file mode 100644 index 0000000000..ff3b2ff6c5 --- /dev/null +++ b/teuthology/test/task/test_ceph_ansible.py @@ -0,0 +1,177 @@ +from mock import patch, MagicMock +from pytest import skip +from teuthology.util.compat import PY3 +if PY3: + from io import StringIO as StringIO +else: + from io import BytesIO as StringIO + +from teuthology.config import FakeNamespace +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra.remote import Remote +from teuthology.task import ceph_ansible +from teuthology.task.ceph_ansible import CephAnsible + +from teuthology.test.task import TestTask + +SKIP_IRRELEVANT = "Not relevant to this subclass" + + +class TestCephAnsibleTask(TestTask): + klass = CephAnsible + task_name = 'ceph_ansible' + + def setup(self): + self.ctx = FakeNamespace() + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['mon.0']) + self.ctx.cluster.add(Remote('user@remote2'), ['mds.0']) + self.ctx.cluster.add(Remote('user@remote3'), ['osd.0']) + self.ctx.summary = dict() + self.ctx.config = dict() + self.ctx.archive = '../' + self.task_config = dict() + self.start_patchers() + + def start_patchers(self): + m_fetch_repo = MagicMock() + m_fetch_repo.return_value = 'PATH' + + def fake_get_scratch_devices(remote): + return ['/dev/%s' % remote.shortname] + + self.patcher_get_scratch_devices = patch( + 'teuthology.task.ceph_ansible.get_scratch_devices', + fake_get_scratch_devices, + ) + self.patcher_get_scratch_devices.start() + + self.patcher_teardown = patch( + 'teuthology.task.ceph_ansible.CephAnsible.teardown', + ) + self.patcher_teardown.start() + + def fake_set_iface_and_cidr(self): + self._interface = 'eth0' + self._cidr = '172.21.0.0/20' + + self.patcher_remote = patch.multiple( + Remote, + _set_iface_and_cidr=fake_set_iface_and_cidr, + ) + self.patcher_remote.start() + + def stop_patchers(self): + self.patcher_get_scratch_devices.stop() + self.patcher_remote.stop() + self.patcher_teardown.stop() + + def test_playbook_none(self): + skip(SKIP_IRRELEVANT) + + def test_inventory_none(self): + skip(SKIP_IRRELEVANT) + + def test_inventory_path(self): + skip(SKIP_IRRELEVANT) + + def test_inventory_etc(self): + skip(SKIP_IRRELEVANT) + + def test_generate_hosts_file(self): + self.task_config.update(dict( + playbook=[], + vars=dict( + osd_auto_discovery=True, + monitor_interface='eth0', + radosgw_interface='eth0', + public_network='172.21.0.0/20', + ), + )) + task = self.klass(self.ctx, self.task_config) + hosts_file_path = '/my/hosts/file' + hosts_file_obj = StringIO() + hosts_file_obj.name = hosts_file_path + with patch.object(ceph_ansible, 'NamedTemporaryFile') as m_NTF: + m_NTF.return_value = hosts_file_obj + task.generate_hosts_file() + m_NTF.assert_called_once_with(prefix="teuth_ansible_hosts_", + mode='w+', + delete=False) + assert task.generated_inventory is True + assert task.inventory == hosts_file_path + hosts_file_obj.seek(0) + assert hosts_file_obj.read() == '\n'.join([ + '[mdss]', + 'remote2', + '', + '[mons]', + 'remote1', + '', + '[osds]', + 'remote3', + ]) + + def test_generate_hosts_file_with_devices(self): + self.task_config.update(dict( + playbook=[], + vars=dict( + monitor_interface='eth0', + radosgw_interface='eth0', + public_network='172.21.0.0/20', + ), + )) + task = self.klass(self.ctx, self.task_config) + hosts_file_path = '/my/hosts/file' + hosts_file_obj = StringIO() + hosts_file_obj.name = hosts_file_path + with patch.object(ceph_ansible, 'NamedTemporaryFile') as m_NTF: + m_NTF.return_value = hosts_file_obj + task.generate_hosts_file() + m_NTF.assert_called_once_with(prefix="teuth_ansible_hosts_", + mode='w+', + delete=False) + assert task.generated_inventory is True + assert task.inventory == hosts_file_path + hosts_file_obj.seek(0) + assert hosts_file_obj.read() == '\n'.join([ + '[mdss]', + 'remote2 devices=\'[]\'', + '', + '[mons]', + 'remote1 devices=\'[]\'', + '', + '[osds]', + 'remote3 devices=\'["/dev/remote3"]\'', + ]) + + def test_generate_hosts_file_with_network(self): + self.task_config.update(dict( + playbook=[], + vars=dict( + osd_auto_discovery=True, + ), + )) + task = self.klass(self.ctx, self.task_config) + hosts_file_path = '/my/hosts/file' + hosts_file_obj = StringIO() + hosts_file_obj.name = hosts_file_path + with patch.object(ceph_ansible, 'NamedTemporaryFile') as m_NTF: + m_NTF.return_value = hosts_file_obj + task.generate_hosts_file() + m_NTF.assert_called_once_with(prefix="teuth_ansible_hosts_", + mode='w+', + delete=False) + assert task.generated_inventory is True + assert task.inventory == hosts_file_path + hosts_file_obj.seek(0) + assert hosts_file_obj.read() == '\n'.join([ + '[mdss]', + "remote2 monitor_interface='eth0' public_network='172.21.0.0/20' radosgw_interface='eth0'", + '', + '[mons]', + "remote1 monitor_interface='eth0' public_network='172.21.0.0/20' radosgw_interface='eth0'", + '', + '[osds]', + "remote3 monitor_interface='eth0' public_network='172.21.0.0/20' radosgw_interface='eth0'", + ]) diff --git a/teuthology/test/task/test_console_log.py b/teuthology/test/task/test_console_log.py new file mode 100644 index 0000000000..132d503264 --- /dev/null +++ b/teuthology/test/task/test_console_log.py @@ -0,0 +1,88 @@ +import os + +from mock import patch + +from teuthology.config import FakeNamespace +from teuthology.config import config as teuth_config +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra.remote import Remote +from teuthology.task.console_log import ConsoleLog + +from teuthology.test.task import TestTask + + +class TestConsoleLog(TestTask): + klass = ConsoleLog + task_name = 'console_log' + + def setup(self): + teuth_config.ipmi_domain = 'ipmi.domain' + teuth_config.ipmi_user = 'ipmi_user' + teuth_config.ipmi_password = 'ipmi_pass' + self.ctx = FakeNamespace() + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2'), ['role2']) + self.ctx.config = dict() + self.ctx.archive = '/fake/path' + self.task_config = dict() + self.start_patchers() + + def start_patchers(self): + self.patchers = dict() + self.patchers['makedirs'] = patch( + 'teuthology.task.console_log.os.makedirs', + ) + self.patchers['is_vm'] = patch( + 'teuthology.lock.query.is_vm', + ) + self.patchers['is_vm'].return_value = False + self.patchers['get_status'] = patch( + 'teuthology.lock.query.get_status', + ) + self.mocks = dict() + for name, patcher in self.patchers.items(): + self.mocks[name] = patcher.start() + self.mocks['is_vm'].return_value = False + + def teardown(self): + for patcher in self.patchers.values(): + patcher.stop() + + def test_enabled(self): + task = self.klass(self.ctx, self.task_config) + assert task.enabled is True + + def test_disabled_noarchive(self): + self.ctx.archive = None + task = self.klass(self.ctx, self.task_config) + assert task.enabled is False + + def test_has_ipmi_credentials(self): + for remote in self.ctx.cluster.remotes.keys(): + remote.console.has_ipmi_credentials = False + remote.console.has_conserver = False + task = self.klass(self.ctx, self.task_config) + assert len(task.cluster.remotes.keys()) == 0 + + def test_remotes(self): + with self.klass(self.ctx, self.task_config) as task: + assert len(task.cluster.remotes) == len(self.ctx.cluster.remotes) + + @patch('teuthology.orchestra.console.PhysicalConsole') + def test_begin(self, m_pconsole): + with self.klass(self.ctx, self.task_config) as task: + assert len(task.processes) == len(self.ctx.cluster.remotes) + for remote in task.cluster.remotes.keys(): + dest_path = os.path.join( + self.ctx.archive, '%s.log' % remote.shortname) + assert remote.console.spawn_sol_log.called_once_with( + dest_path=dest_path) + + @patch('teuthology.orchestra.console.PhysicalConsole') + def test_end(self, m_pconsole): + with self.klass(self.ctx, self.task_config) as task: + pass + for proc in task.processes.values(): + assert proc.terminate.called_once_with() + assert proc.kill.called_once_with() diff --git a/teuthology/test/task/test_install.py b/teuthology/test/task/test_install.py new file mode 100644 index 0000000000..3c5be90484 --- /dev/null +++ b/teuthology/test/task/test_install.py @@ -0,0 +1,337 @@ +import os +import pytest +import yaml + +from mock import patch, Mock + +from teuthology.task import install + + +class TestInstall(object): + + def _get_default_package_list(self, project='ceph', debug=False): + path = os.path.join( + os.path.dirname(__file__), + '..', '..', 'task', 'install', 'packages.yaml', + ) + pkgs = yaml.safe_load(open(path))[project] + if not debug: + pkgs['deb'] = [p for p in pkgs['deb'] + if not p.endswith('-dbg')] + pkgs['rpm'] = [p for p in pkgs['rpm'] + if not p.endswith('-debuginfo')] + return pkgs + + def test_get_package_list_debug(self): + default_pkgs = self._get_default_package_list(debug=True) + default_pkgs['rpm'].sort() + default_pkgs['deb'].sort() + config = dict(debuginfo=True) + result = install.get_package_list(ctx=None, config=config) + result['rpm'].sort() + result['deb'].sort() + assert result == default_pkgs + + def test_get_package_list_no_debug(self): + default_pkgs = self._get_default_package_list(debug=False) + default_pkgs['rpm'].sort() + default_pkgs['deb'].sort() + config = dict(debuginfo=False) + result = install.get_package_list(ctx=None, config=config) + result['rpm'].sort() + result['deb'].sort() + assert result == default_pkgs + + def test_get_package_list_custom_rpm(self): + default_pkgs = self._get_default_package_list(debug=False) + default_pkgs['rpm'].sort() + default_pkgs['deb'].sort() + rpms = ['rpm1', 'rpm2', 'rpm2-debuginfo'] + config = dict(packages=dict(rpm=rpms)) + result = install.get_package_list(ctx=None, config=config) + result['rpm'].sort() + result['deb'].sort() + assert result['rpm'] == ['rpm1', 'rpm2'] + assert result['deb'] == default_pkgs['deb'] + + @patch("teuthology.task.install._get_builder_project") + @patch("teuthology.task.install.packaging.get_package_version") + def test_get_upgrade_version(self, m_get_package_version, + m_gitbuilder_project): + gb = Mock() + gb.version = "11.0.0" + gb.project = "ceph" + m_gitbuilder_project.return_value = gb + m_get_package_version.return_value = "11.0.0" + install.get_upgrade_version(Mock(), Mock(), Mock()) + + @patch("teuthology.task.install._get_builder_project") + @patch("teuthology.task.install.packaging.get_package_version") + def test_verify_ceph_version_success(self, m_get_package_version, + m_gitbuilder_project): + gb = Mock() + gb.version = "0.89.0" + gb.project = "ceph" + m_gitbuilder_project.return_value = gb + m_get_package_version.return_value = "0.89.0" + config = dict() + install.verify_package_version(Mock(), config, Mock()) + + @patch("teuthology.task.install._get_builder_project") + @patch("teuthology.task.install.packaging.get_package_version") + def test_verify_ceph_version_failed(self, m_get_package_version, + m_gitbuilder_project): + gb = Mock() + gb.version = "0.89.0" + gb.project = "ceph" + m_gitbuilder_project.return_value = gb + m_get_package_version.return_value = "0.89.1" + config = dict() + with pytest.raises(RuntimeError): + install.verify_package_version(Mock(), config, Mock()) + + @patch("teuthology.task.install._get_builder_project") + @patch("teuthology.task.install.packaging.get_package_version") + def test_skip_when_using_ceph_deploy(self, m_get_package_version, + m_gitbuilder_project): + gb = Mock() + gb.version = "0.89.0" + gb.project = "ceph" + m_gitbuilder_project.return_value = gb + # ceph isn't installed because ceph-deploy would install it + m_get_package_version.return_value = None + config = dict() + config['extras'] = True + install.verify_package_version(Mock(), config, Mock()) + + def test_get_flavor_default(self): + config = dict() + assert install.get_flavor(config) == 'default' + + def test_get_flavor_simple(self): + config = dict( + flavor='notcmalloc' + ) + assert install.get_flavor(config) == 'notcmalloc' + + def test_get_flavor_valgrind(self): + config = dict( + valgrind=True + ) + assert install.get_flavor(config) == 'notcmalloc' + + def test_upgrade_is_downgrade(self): + assert_ok_vals = [ + ('9.0.0', '10.0.0'), + ('10.2.2-63-g8542898-1trusty', '10.2.2-64-gabcdef1-1trusty'), + ('11.0.0-918.g13c13c7', '11.0.0-2165.gabcdef1') + ] + for t in assert_ok_vals: + assert install._upgrade_is_downgrade(t[0], t[1]) == False + + @patch("teuthology.packaging.get_package_version") + @patch("teuthology.misc.get_system_type") + @patch("teuthology.task.install.verify_package_version") + @patch("teuthology.task.install.get_upgrade_version") + def test_upgrade_common(self, + m_get_upgrade_version, + m_verify_package_version, + m_get_system_type, + m_get_package_version): + expected_system_type = 'deb' + def make_remote(): + remote = Mock() + remote.arch = 'x86_64' + remote.os = Mock() + remote.os.name = 'ubuntu' + remote.os.version = '14.04' + remote.os.codename = 'trusty' + remote.system_type = expected_system_type + return remote + ctx = Mock() + class cluster: + remote1 = make_remote() + remote2 = make_remote() + remotes = { + remote1: ['client.0'], + remote2: ['mon.a','osd.0'], + } + def only(self, role): + result = Mock() + if role in ('client.0',): + result.remotes = { cluster.remote1: None } + if role in ('osd.0', 'mon.a'): + result.remotes = { cluster.remote2: None } + return result + ctx.cluster = cluster() + config = { + 'client.0': { + 'sha1': 'expectedsha1', + }, + } + ctx.config = { + 'roles': [ ['client.0'], ['mon.a','osd.0'] ], + 'tasks': [ + { + 'install.upgrade': config, + }, + ], + } + m_get_upgrade_version.return_value = "11.0.0" + m_get_package_version.return_value = "10.2.4" + m_get_system_type.return_value = "deb" + def upgrade(ctx, node, remote, pkgs, system_type): + assert system_type == expected_system_type + assert install.upgrade_common(ctx, config, upgrade) == 1 + expected_config = { + 'project': 'ceph', + 'sha1': 'expectedsha1', + } + m_verify_package_version.assert_called_with(ctx, + expected_config, + cluster.remote1) + def test_upgrade_remote_to_config(self): + expected_system_type = 'deb' + def make_remote(): + remote = Mock() + remote.arch = 'x86_64' + remote.os = Mock() + remote.os.name = 'ubuntu' + remote.os.version = '14.04' + remote.os.codename = 'trusty' + remote.system_type = expected_system_type + return remote + ctx = Mock() + class cluster: + remote1 = make_remote() + remote2 = make_remote() + remotes = { + remote1: ['client.0'], + remote2: ['mon.a','osd.0'], + } + def only(self, role): + result = Mock() + if role in ('client.0',): + result.remotes = { cluster.remote1: None } + elif role in ('osd.0', 'mon.a'): + result.remotes = { cluster.remote2: None } + else: + result.remotes = None + return result + ctx.cluster = cluster() + ctx.config = { + 'roles': [ ['client.0'], ['mon.a','osd.0'] ], + } + + # nothing -> nothing + assert install.upgrade_remote_to_config(ctx, {}) == {} + + # select the remote for the osd.0 role + # the 'ignored' role does not exist and is ignored + # the remote for mon.a is the same as for osd.0 and + # is silently ignored (actually it could be the other + # way around, depending on how the keys are hashed) + config = { + 'osd.0': { + 'sha1': 'expectedsha1', + }, + 'ignored': None, + 'mon.a': { + 'sha1': 'expectedsha1', + }, + } + expected_config = { + cluster.remote2: { + 'project': 'ceph', + 'sha1': 'expectedsha1', + }, + } + assert install.upgrade_remote_to_config(ctx, config) == expected_config + + # select all nodes, regardless + config = { + 'all': { + 'sha1': 'expectedsha1', + }, + } + expected_config = { + cluster.remote1: { + 'project': 'ceph', + 'sha1': 'expectedsha1', + }, + cluster.remote2: { + 'project': 'ceph', + 'sha1': 'expectedsha1', + }, + } + assert install.upgrade_remote_to_config(ctx, config) == expected_config + + # verify that install overrides are used as default + # values for the upgrade task, not as override + ctx.config['overrides'] = { + 'install': { + 'ceph': { + 'sha1': 'overridesha1', + 'tag': 'overridetag', + 'branch': 'overridebranch', + }, + }, + } + config = { + 'client.0': { + 'sha1': 'expectedsha1', + }, + 'osd.0': { + }, + } + expected_config = { + cluster.remote1: { + 'project': 'ceph', + 'sha1': 'expectedsha1', + }, + cluster.remote2: { + 'project': 'ceph', + 'sha1': 'overridesha1', + 'tag': 'overridetag', + 'branch': 'overridebranch', + }, + } + assert install.upgrade_remote_to_config(ctx, config) == expected_config + + + @patch("teuthology.task.install.packaging.get_package_version") + @patch("teuthology.task.install.redhat.set_deb_repo") + def test_rh_install_deb_pkgs(self, m_set_rh_deb_repo, m_get_pkg_version): + ctx = Mock() + remote = Mock() + version = '1.3.2' + rh_ds_yaml = dict() + rh_ds_yaml = { + 'versions': {'deb': {'mapped': {'1.3.2': '0.94.5'}}}, + 'pkgs': {'deb': ['pkg1', 'pkg2']}, + 'extra_system_packages': {'deb': ['es_pkg1', 'es_pkg2']}, + 'extra_packages': {'deb': ['e_pkg1', 'e_pkg2']}, + } + m_get_pkg_version.return_value = "0.94.5" + install.redhat.install_deb_pkgs(ctx, remote, version, rh_ds_yaml) + + @patch("teuthology.task.install.packaging.get_package_version") + def test_rh_install_pkgs(self, m_get_pkg_version): + ctx = Mock() + remote = Mock() + version = '1.3.2' + rh_ds_yaml = dict() + rh_ds_yaml = { + 'versions': {'rpm': {'mapped': {'1.3.2': '0.94.5', + '1.3.1': '0.94.3'}}}, + 'pkgs': {'rpm': ['pkg1', 'pkg2']}, + 'extra_system_packages': {'rpm': ['es_pkg1', 'es_pkg2']}, + 'extra_packages': {'rpm': ['e_pkg1', 'e_pkg2']}, + } + + m_get_pkg_version.return_value = "0.94.5" + install.redhat.install_pkgs(ctx, remote, version, rh_ds_yaml) + version = '1.3.1' + with pytest.raises(RuntimeError) as e: + install.redhat.install_pkgs(ctx, remote, version, rh_ds_yaml) + assert "Version check failed" in str(e) diff --git a/teuthology/test/task/test_internal.py b/teuthology/test/task/test_internal.py new file mode 100644 index 0000000000..4d125f5036 --- /dev/null +++ b/teuthology/test/task/test_internal.py @@ -0,0 +1,57 @@ +from teuthology.config import FakeNamespace +from teuthology.task import internal + + +class TestInternal(object): + def setup(self): + self.ctx = FakeNamespace() + self.ctx.config = dict() + + def test_buildpackages_prep(self): + # + # no buildpackages nor install tasks + # + self.ctx.config = { 'tasks': [] } + assert internal.buildpackages_prep(self.ctx, + self.ctx.config) == internal.BUILDPACKAGES_NOTHING + # + # make the buildpackages tasks the first to run + # + self.ctx.config = { + 'tasks': [ { 'atask': None }, + { 'internal.buildpackages_prep': None }, + { 'btask': None }, + { 'install': None }, + { 'buildpackages': None } ], + } + assert internal.buildpackages_prep(self.ctx, + self.ctx.config) == internal.BUILDPACKAGES_FIRST + assert self.ctx.config == { + 'tasks': [ { 'atask': None }, + { 'internal.buildpackages_prep': None }, + { 'buildpackages': None }, + { 'btask': None }, + { 'install': None } ], + } + # + # the buildpackages task already the first task to run + # + assert internal.buildpackages_prep(self.ctx, + self.ctx.config) == internal.BUILDPACKAGES_OK + # + # no buildpackages task + # + self.ctx.config = { + 'tasks': [ { 'install': None } ], + } + assert internal.buildpackages_prep(self.ctx, + self.ctx.config) == internal.BUILDPACKAGES_NOTHING + # + # no install task: the buildpackages task must be removed + # + self.ctx.config = { + 'tasks': [ { 'buildpackages': None } ], + } + assert internal.buildpackages_prep(self.ctx, + self.ctx.config) == internal.BUILDPACKAGES_REMOVED + assert self.ctx.config == {'tasks': []} diff --git a/teuthology/test/task/test_kernel.py b/teuthology/test/task/test_kernel.py new file mode 100644 index 0000000000..7be86587dc --- /dev/null +++ b/teuthology/test/task/test_kernel.py @@ -0,0 +1,243 @@ +from teuthology.config import FakeNamespace +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra.remote import Remote +from teuthology.task.kernel import ( + normalize_and_apply_overrides, + CONFIG_DEFAULT, + TIMEOUT_DEFAULT, +) + +class TestKernelNormalizeAndApplyOverrides(object): + + def setup(self): + self.ctx = FakeNamespace() + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('remote1'), ['mon.a', 'client.0']) + self.ctx.cluster.add(Remote('remote2'), ['osd.0', 'osd.1', 'osd.2']) + self.ctx.cluster.add(Remote('remote3'), ['client.1']) + + def test_default(self): + config = {} + overrides = {} + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'mon.a': CONFIG_DEFAULT, + 'osd.0': CONFIG_DEFAULT, + 'osd.1': CONFIG_DEFAULT, + 'osd.2': CONFIG_DEFAULT, + 'client.0': CONFIG_DEFAULT, + 'client.1': CONFIG_DEFAULT, + } + assert t == TIMEOUT_DEFAULT + + def test_timeout_default(self): + config = { + 'client.0': {'branch': 'testing'}, + } + overrides = {} + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'client.0': {'branch': 'testing'}, + } + assert t == TIMEOUT_DEFAULT + + def test_timeout(self): + config = { + 'client.0': {'branch': 'testing'}, + 'timeout': 100, + } + overrides = {} + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'client.0': {'branch': 'testing'}, + } + assert t == 100 + + def test_override_timeout(self): + config = { + 'client.0': {'branch': 'testing'}, + 'timeout': 100, + } + overrides = { + 'timeout': 200, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'client.0': {'branch': 'testing'}, + } + assert t == 200 + + def test_override_same_version_key(self): + config = { + 'client.0': {'branch': 'testing'}, + } + overrides = { + 'client.0': {'branch': 'wip-foobar'}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'client.0': {'branch': 'wip-foobar'}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_different_version_key(self): + config = { + 'client.0': {'branch': 'testing'}, + } + overrides = { + 'client.0': {'tag': 'v4.1'}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'client.0': {'tag': 'v4.1'}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_actual(self): + config = { + 'osd.1': {'tag': 'v4.1'}, + 'client.0': {'branch': 'testing'}, + } + overrides = { + 'osd.1': {'koji': 1234, 'kdb': True}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'osd.1': {'koji': 1234, 'kdb': True}, + 'client.0': {'branch': 'testing'}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_actual_with_generic(self): + config = { + 'osd.1': {'tag': 'v4.1', 'kdb': False}, + 'client.0': {'branch': 'testing'}, + } + overrides = { + 'osd': {'koji': 1234}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'osd.0': {'koji': 1234}, + 'osd.1': {'koji': 1234, 'kdb': False}, + 'osd.2': {'koji': 1234}, + 'client.0': {'branch': 'testing'}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_actual_with_top_level(self): + config = { + 'osd.1': {'tag': 'v4.1'}, + 'client.0': {'branch': 'testing', 'kdb': False}, + } + overrides = {'koji': 1234, 'kdb': True} + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'mon.a': {'koji': 1234, 'kdb': True}, + 'osd.0': {'koji': 1234, 'kdb': True}, + 'osd.1': {'koji': 1234, 'kdb': True}, + 'osd.2': {'koji': 1234, 'kdb': True}, + 'client.0': {'koji': 1234, 'kdb': True}, + 'client.1': {'koji': 1234, 'kdb': True}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_generic(self): + config = { + 'osd': {'tag': 'v4.1'}, + 'client': {'branch': 'testing'}, + } + overrides = { + 'client': {'koji': 1234, 'kdb': True}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'osd.0': {'tag': 'v4.1'}, + 'osd.1': {'tag': 'v4.1'}, + 'osd.2': {'tag': 'v4.1'}, + 'client.0': {'koji': 1234, 'kdb': True}, + 'client.1': {'koji': 1234, 'kdb': True}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_generic_with_top_level(self): + config = { + 'osd': {'tag': 'v4.1'}, + 'client': {'branch': 'testing', 'kdb': False}, + } + overrides = { + 'client': {'koji': 1234}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'osd.0': {'tag': 'v4.1'}, + 'osd.1': {'tag': 'v4.1'}, + 'osd.2': {'tag': 'v4.1'}, + 'client.0': {'koji': 1234, 'kdb': False}, + 'client.1': {'koji': 1234, 'kdb': False}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_generic_with_actual(self): + config = { + 'osd': {'tag': 'v4.1', 'kdb': False}, + 'client': {'branch': 'testing'}, + } + overrides = { + 'osd.2': {'koji': 1234, 'kdb': True}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'osd.0': {'tag': 'v4.1', 'kdb': False}, + 'osd.1': {'tag': 'v4.1', 'kdb': False}, + 'osd.2': {'koji': 1234, 'kdb': True}, + 'client.0': {'branch': 'testing'}, + 'client.1': {'branch': 'testing'}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_top_level(self): + config = {'branch': 'testing'} + overrides = {'koji': 1234, 'kdb': True} + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'mon.a': {'koji': 1234, 'kdb': True}, + 'osd.0': {'koji': 1234, 'kdb': True}, + 'osd.1': {'koji': 1234, 'kdb': True}, + 'osd.2': {'koji': 1234, 'kdb': True}, + 'client.0': {'koji': 1234, 'kdb': True}, + 'client.1': {'koji': 1234, 'kdb': True}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_top_level_with_actual(self): + config = {'branch': 'testing', 'kdb': False} + overrides = { + 'mon.a': {'koji': 1234}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'mon.a': {'koji': 1234, 'kdb': False}, + 'osd.0': {'branch': 'testing', 'kdb': False}, + 'osd.1': {'branch': 'testing', 'kdb': False}, + 'osd.2': {'branch': 'testing', 'kdb': False}, + 'client.0': {'branch': 'testing', 'kdb': False}, + 'client.1': {'branch': 'testing', 'kdb': False}, + } + assert t == TIMEOUT_DEFAULT + + def test_override_top_level_with_generic(self): + config = {'branch': 'testing', 'kdb': False} + overrides = { + 'client': {'koji': 1234, 'kdb': True}, + } + config, t = normalize_and_apply_overrides(self.ctx, config, overrides) + assert config == { + 'mon.a': {'branch': 'testing', 'kdb': False}, + 'osd.0': {'branch': 'testing', 'kdb': False}, + 'osd.1': {'branch': 'testing', 'kdb': False}, + 'osd.2': {'branch': 'testing', 'kdb': False}, + 'client.0': {'koji': 1234, 'kdb': True}, + 'client.1': {'koji': 1234, 'kdb': True}, + } + assert t == TIMEOUT_DEFAULT diff --git a/teuthology/test/task/test_pcp.py b/teuthology/test/task/test_pcp.py new file mode 100644 index 0000000000..c70e544533 --- /dev/null +++ b/teuthology/test/task/test_pcp.py @@ -0,0 +1,379 @@ +import os +import requests + +from teuthology.util.compat import parse_qs, urljoin + +from mock import patch, DEFAULT, Mock, mock_open, call +from pytest import raises + +from teuthology.config import config, FakeNamespace +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra.remote import Remote +from teuthology.orchestra.run import Raw +from teuthology.task.pcp import (PCPDataSource, PCPArchive, PCPGrapher, + GrafanaGrapher, GraphiteGrapher, PCP) + +from teuthology.test.task import TestTask + +pcp_host = 'http://pcp.front.sepia.ceph.com:44323/' + + +class TestPCPDataSource(object): + klass = PCPDataSource + + def setup(self): + config.pcp_host = pcp_host + + def test_init(self): + hosts = ['host1', 'host2'] + time_from = 'now-2h' + time_until = 'now' + obj = self.klass( + hosts=hosts, + time_from=time_from, + time_until=time_until, + ) + assert obj.hosts == hosts + assert obj.time_from == time_from + assert obj.time_until == time_until + + +class TestPCPArchive(TestPCPDataSource): + klass = PCPArchive + + def test_get_archive_input_dir(self): + hosts = ['host1', 'host2'] + time_from = 'now-1d' + obj = self.klass( + hosts=hosts, + time_from=time_from, + ) + assert obj.get_archive_input_dir('host1') == \ + '/var/log/pcp/pmlogger/host1' + + def test_get_pmlogextract_cmd(self): + obj = self.klass( + hosts=['host1'], + time_from='now-3h', + time_until='now-1h', + ) + expected = [ + 'pmlogextract', + '-S', 'now-3h', + '-T', 'now-1h', + Raw('/var/log/pcp/pmlogger/host1/*.0'), + ] + assert obj.get_pmlogextract_cmd('host1') == expected + + def test_format_time(self): + assert self.klass._format_time(1462893484) == \ + '@ Tue May 10 15:18:04 2016' + + def test_format_time_now(self): + assert self.klass._format_time('now-1h') == 'now-1h' + + +class TestPCPGrapher(TestPCPDataSource): + klass = PCPGrapher + + def test_init(self): + hosts = ['host1', 'host2'] + time_from = 'now-2h' + time_until = 'now' + obj = self.klass( + hosts=hosts, + time_from=time_from, + time_until=time_until, + ) + assert obj.hosts == hosts + assert obj.time_from == time_from + assert obj.time_until == time_until + expected_url = urljoin(config.pcp_host, self.klass._endpoint) + assert obj.base_url == expected_url + + +class TestGrafanaGrapher(TestPCPGrapher): + klass = GrafanaGrapher + + def test_build_graph_url(self): + hosts = ['host1'] + time_from = 'now-3h' + time_until = 'now-1h' + obj = self.klass( + hosts=hosts, + time_from=time_from, + time_until=time_until, + ) + base_url = urljoin( + config.pcp_host, + 'grafana/index.html#/dashboard/script/index.js', + ) + assert obj.base_url == base_url + got_url = obj.build_graph_url() + parsed_query = parse_qs(got_url.split('?')[1]) + assert parsed_query['hosts'] == hosts + assert len(parsed_query['time_from']) == 1 + assert parsed_query['time_from'][0] == time_from + assert len(parsed_query['time_to']) == 1 + assert parsed_query['time_to'][0] == time_until + + def test_format_time(self): + assert self.klass._format_time(1462893484) == \ + '2016-05-10T15:18:04' + + def test_format_time_now(self): + assert self.klass._format_time('now-1h') == 'now-1h' + + +class TestGraphiteGrapher(TestPCPGrapher): + klass = GraphiteGrapher + + def test_build_graph_urls(self): + obj = self.klass( + hosts=['host1', 'host2'], + time_from='now-3h', + time_until='now-1h', + ) + expected_urls = [obj.get_graph_url(m) for m in obj.metrics] + obj.build_graph_urls() + built_urls = [] + for metric in obj.graphs.keys(): + built_urls.append(obj.graphs[metric]['url']) + assert len(built_urls) == len(expected_urls) + assert sorted(built_urls) == sorted(expected_urls) + + def test_check_dest_dir(self): + obj = self.klass( + hosts=['host1'], + time_from='now-3h', + ) + assert obj.dest_dir is None + with raises(RuntimeError): + obj._check_dest_dir() + + def test_generate_html_dynamic(self): + obj = self.klass( + hosts=['host1'], + time_from='now-3h', + ) + html = obj.generate_html() + assert config.pcp_host in html + + def test_download_graphs(self): + dest_dir = '/fake/path' + obj = self.klass( + hosts=['host1'], + time_from='now-3h', + dest_dir=dest_dir, + ) + _format = obj.graph_defaults.get('format') + with patch('teuthology.task.pcp.requests.get', create=True) as m_get: + m_resp = Mock() + m_resp.ok = True + m_get.return_value = m_resp + with patch('teuthology.task.pcp.open', mock_open(), create=True): + obj.download_graphs() + expected_filenames = [] + for metric in obj.metrics: + expected_filenames.append( + "{}.{}".format( + os.path.join( + dest_dir, + obj._sanitize_metric_name(metric), + ), + _format, + ) + ) + graph_filenames = [] + for metric in obj.graphs.keys(): + graph_filenames.append(obj.graphs[metric]['file']) + assert sorted(graph_filenames) == sorted(expected_filenames) + + def test_generate_html_static(self): + obj = self.klass( + hosts=['host1'], + time_from='now-3h', + dest_dir='/fake/path', + ) + with patch('teuthology.task.pcp.requests.get', create=True) as m_get: + m_resp = Mock() + m_resp.ok = True + m_get.return_value = m_resp + with patch('teuthology.task.pcp.open', mock_open(), create=True): + obj.download_graphs() + html = obj.generate_html(mode='static') + assert config.pcp_host not in html + + def test_sanitize_metric_name(self): + sanitized_metrics = { + 'foo.bar': 'foo.bar', + 'foo.*': 'foo._all_', + 'foo.bar baz': 'foo.bar_baz', + 'foo.*.bar baz': 'foo._all_.bar_baz', + } + for in_, out in sanitized_metrics.items(): + assert self.klass._sanitize_metric_name(in_) == out + + def test_get_target_globs(self): + obj = self.klass( + hosts=['host1'], + time_from='now-3h', + ) + assert obj.get_target_globs() == ['*host1*'] + assert obj.get_target_globs('a.metric') == ['*host1*.a.metric'] + obj.hosts.append('host2') + assert obj.get_target_globs() == ['*host1*', '*host2*'] + assert obj.get_target_globs('a.metric') == \ + ['*host1*.a.metric', '*host2*.a.metric'] + + +class TestPCPTask(TestTask): + klass = PCP + task_name = 'pcp' + + def setup(self): + self.ctx = FakeNamespace() + self.ctx.cluster = Cluster() + self.ctx.cluster.add(Remote('user@remote1'), ['role1']) + self.ctx.cluster.add(Remote('user@remote2'), ['role2']) + self.ctx.config = dict() + self.task_config = dict() + config.pcp_host = pcp_host + + def test_init(self): + task = self.klass(self.ctx, self.task_config) + assert task.stop_time == 'now' + + def test_disabled(self): + config.pcp_host = None + with self.klass(self.ctx, self.task_config) as task: + assert task.enabled is False + assert not hasattr(task, 'grafana') + assert not hasattr(task, 'graphite') + assert not hasattr(task, 'archiver') + + def test_setup(self): + with patch.multiple( + self.klass, + setup_collectors=DEFAULT, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task.setup_collectors.assert_called_once_with() + assert isinstance(task.start_time, int) + + def test_setup_collectors(self): + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + assert hasattr(task, 'grafana') + assert not hasattr(task, 'graphite') + assert not hasattr(task, 'archiver') + self.task_config['grafana'] = False + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'grafana') + + @patch('os.makedirs') + def test_setup_grafana(self, m_makedirs): + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + self.ctx.archive = '/fake/path' + with self.klass(self.ctx, self.task_config) as task: + assert hasattr(task, 'grafana') + self.task_config['grafana'] = False + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'grafana') + + @patch('os.makedirs') + @patch('teuthology.task.pcp.GraphiteGrapher') + def test_setup_graphite(self, m_graphite_grapher, m_makedirs): + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'graphite') + self.task_config['graphite'] = False + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'graphite') + self.ctx.archive = '/fake/path' + self.task_config['graphite'] = True + with self.klass(self.ctx, self.task_config) as task: + assert hasattr(task, 'graphite') + self.task_config['graphite'] = False + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'graphite') + + @patch('os.makedirs') + @patch('teuthology.task.pcp.PCPArchive') + def test_setup_archiver(self, m_archive, m_makedirs): + with patch.multiple( + self.klass, + begin=DEFAULT, + end=DEFAULT, + ): + self.task_config['fetch_archives'] = True + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'archiver') + self.task_config['fetch_archives'] = False + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'archiver') + self.ctx.archive = '/fake/path' + self.task_config['fetch_archives'] = True + with self.klass(self.ctx, self.task_config) as task: + assert hasattr(task, 'archiver') + self.task_config['fetch_archives'] = False + with self.klass(self.ctx, self.task_config) as task: + assert not hasattr(task, 'archiver') + + @patch('os.makedirs') + @patch('teuthology.task.pcp.GrafanaGrapher') + @patch('teuthology.task.pcp.GraphiteGrapher') + def test_begin(self, m_grafana, m_graphite, m_makedirs): + with patch.multiple( + self.klass, + end=DEFAULT, + ): + with self.klass(self.ctx, self.task_config) as task: + task.grafana.build_graph_url.assert_called_once_with() + self.task_config['graphite'] = True + self.ctx.archive = '/fake/path' + with self.klass(self.ctx, self.task_config) as task: + task.graphite.write_html.assert_called_once_with() + + @patch('os.makedirs') + @patch('teuthology.task.pcp.GrafanaGrapher') + @patch('teuthology.task.pcp.GraphiteGrapher') + def test_end(self, m_grafana, m_graphite, m_makedirs): + self.ctx.archive = '/fake/path' + with self.klass(self.ctx, self.task_config) as task: + # begin() should have called write_html() once by now, with no args + task.graphite.write_html.assert_called_once_with() + # end() should have called write_html() a second time by now, with + # mode=static + second_call = task.graphite.write_html.call_args_list[1] + assert second_call[1]['mode'] == 'static' + assert isinstance(task.stop_time, int) + + @patch('os.makedirs') + @patch('teuthology.task.pcp.GrafanaGrapher') + @patch('teuthology.task.pcp.GraphiteGrapher') + def test_end_16049(self, m_grafana, m_graphite, m_makedirs): + # http://tracker.ceph.com/issues/16049 + # Jobs were failing if graph downloading failed. We don't want that. + self.ctx.archive = '/fake/path' + with self.klass(self.ctx, self.task_config) as task: + task.graphite.download_graphs.side_effect = \ + requests.ConnectionError + # Even though downloading graphs failed, we should have called + # write_html() a second time, again with no args + assert task.graphite.write_html.call_args_list == [call(), call()] + assert isinstance(task.stop_time, int) diff --git a/teuthology/test/task/test_selinux.py b/teuthology/test/task/test_selinux.py new file mode 100644 index 0000000000..ddbff06d08 --- /dev/null +++ b/teuthology/test/task/test_selinux.py @@ -0,0 +1,35 @@ +from mock import patch, Mock, DEFAULT + +from teuthology.config import FakeNamespace +from teuthology.orchestra.cluster import Cluster +from teuthology.orchestra.remote import Remote +from teuthology.task.selinux import SELinux + + +class TestSELinux(object): + def setup(self): + self.ctx = FakeNamespace() + self.ctx.config = dict() + + def test_host_exclusion(self): + with patch.multiple( + Remote, + os=DEFAULT, + run=DEFAULT, + ): + self.ctx.cluster = Cluster() + remote1 = Remote('remote1') + remote1.os = Mock() + remote1.os.package_type = 'rpm' + remote1._is_vm = False + self.ctx.cluster.add(remote1, ['role1']) + remote2 = Remote('remote1') + remote2.os = Mock() + remote2.os.package_type = 'deb' + remote2._is_vm = False + self.ctx.cluster.add(remote2, ['role2']) + task_config = dict() + with SELinux(self.ctx, task_config) as task: + remotes = list(task.cluster.remotes) + assert remotes == [remote1] + diff --git a/teuthology/test/test_config.py b/teuthology/test/test_config.py new file mode 100644 index 0000000000..cbf7c3ecfb --- /dev/null +++ b/teuthology/test/test_config.py @@ -0,0 +1,189 @@ +import pytest + +from teuthology import config + + +class TestYamlConfig(object): + def setup(self): + self.test_class = config.YamlConfig + + def test_set_multiple(self): + conf_obj = self.test_class() + conf_obj.foo = 'foo' + conf_obj.bar = 'bar' + assert conf_obj.foo == 'foo' + assert conf_obj.bar == 'bar' + assert conf_obj.to_dict()['foo'] == 'foo' + + def test_from_dict(self): + in_dict = dict(foo='bar') + conf_obj = self.test_class.from_dict(in_dict) + assert conf_obj.foo == 'bar' + + def test_contains(self): + in_dict = dict(foo='bar') + conf_obj = self.test_class.from_dict(in_dict) + conf_obj.bar = "foo" + assert "bar" in conf_obj + assert "foo" in conf_obj + assert "baz" not in conf_obj + + def test_to_dict(self): + in_dict = dict(foo='bar') + conf_obj = self.test_class.from_dict(in_dict) + assert conf_obj.to_dict() == in_dict + + def test_from_str(self): + in_str = "foo: bar" + conf_obj = self.test_class.from_str(in_str) + assert conf_obj.foo == 'bar' + + def test_to_str(self): + in_str = "foo: bar" + conf_obj = self.test_class.from_str(in_str) + assert conf_obj.to_str() == in_str + + def test_update(self): + conf_obj = self.test_class(dict()) + conf_obj.foo = 'foo' + conf_obj.bar = 'bar' + conf_obj.update(dict(bar='baz')) + assert conf_obj.foo == 'foo' + assert conf_obj.bar == 'baz' + + def test_delattr(self): + conf_obj = self.test_class() + conf_obj.foo = 'bar' + assert conf_obj.foo == 'bar' + del conf_obj.foo + assert conf_obj.foo is None + + def test_assignment(self): + conf_obj = self.test_class() + conf_obj["foo"] = "bar" + assert conf_obj["foo"] == "bar" + assert conf_obj.foo == "bar" + + def test_used_with_update(self): + d = dict() + conf_obj = self.test_class.from_dict({"foo": "bar"}) + d.update(conf_obj) + assert d["foo"] == "bar" + + def test_get(self): + conf_obj = self.test_class() + assert conf_obj.get('foo') is None + assert conf_obj.get('foo', 'bar') == 'bar' + conf_obj.foo = 'baz' + assert conf_obj.get('foo') == 'baz' + + +class TestTeuthologyConfig(TestYamlConfig): + def setup(self): + self.test_class = config.TeuthologyConfig + + def test_get_ceph_git_base_default(self): + conf_obj = self.test_class() + conf_obj.yaml_path = '' + conf_obj.load() + assert conf_obj.ceph_git_base_url == "https://github.com/ceph/" + + def test_set_ceph_git_base_via_private(self): + conf_obj = self.test_class() + conf_obj._conf['ceph_git_base_url'] = \ + "git://git.ceph.com/" + assert conf_obj.ceph_git_base_url == "git://git.ceph.com/" + + def test_get_reserve_machines_default(self): + conf_obj = self.test_class() + conf_obj.yaml_path = '' + conf_obj.load() + assert conf_obj.reserve_machines == 5 + + def test_set_reserve_machines_via_private(self): + conf_obj = self.test_class() + conf_obj._conf['reserve_machines'] = 2 + assert conf_obj.reserve_machines == 2 + + def test_set_nonstandard(self): + conf_obj = self.test_class() + conf_obj.something = 'something else' + assert conf_obj.something == 'something else' + + +class TestJobConfig(TestYamlConfig): + def setup(self): + self.test_class = config.JobConfig + + +class TestFakeNamespace(TestYamlConfig): + def setup(self): + self.test_class = config.FakeNamespace + + def test_docopt_dict(self): + """ + Tests if a dict in the format that docopt returns can + be parsed correctly. + """ + d = { + "--verbose": True, + "--an-option": "some_option", + "": "the_arg", + "something": "some_thing", + } + conf_obj = self.test_class(d) + assert conf_obj.verbose + assert conf_obj.an_option == "some_option" + assert conf_obj.an_arg == "the_arg" + assert conf_obj.something == "some_thing" + + def test_config(self): + """ + Tests that a teuthology_config property is automatically added + to the conf_obj + """ + conf_obj = self.test_class(dict(foo="bar")) + assert conf_obj["foo"] == "bar" + assert conf_obj.foo == "bar" + assert conf_obj.teuthology_config.get("fake key") is None + + def test_getattr(self): + conf_obj = self.test_class.from_dict({"foo": "bar"}) + result = getattr(conf_obj, "not_there", "default") + assert result == "default" + result = getattr(conf_obj, "foo") + assert result == "bar" + + def test_none(self): + conf_obj = self.test_class.from_dict(dict(null=None)) + assert conf_obj.null is None + + def test_delattr(self): + conf_obj = self.test_class() + conf_obj.foo = 'bar' + assert conf_obj.foo == 'bar' + del conf_obj.foo + with pytest.raises(AttributeError): + conf_obj.foo + + def test_to_str(self): + in_str = "foo: bar" + conf_obj = self.test_class.from_str(in_str) + assert conf_obj.to_str() == "{'foo': 'bar'}" + + def test_multiple_access(self): + """ + Test that config.config and FakeNamespace.teuthology_config reflect + each others' modifications + """ + in_str = "foo: bar" + conf_obj = self.test_class.from_str(in_str) + assert config.config.get('test_key_1') is None + assert conf_obj.teuthology_config.get('test_key_1') is None + config.config.test_key_1 = 'test value' + assert conf_obj.teuthology_config['test_key_1'] == 'test value' + + assert config.config.get('test_key_2') is None + assert conf_obj.teuthology_config.get('test_key_2') is None + conf_obj.teuthology_config['test_key_2'] = 'test value' + assert config.config['test_key_2'] == 'test value' diff --git a/teuthology/test/test_contextutil.py b/teuthology/test/test_contextutil.py new file mode 100644 index 0000000000..d7d1f4323f --- /dev/null +++ b/teuthology/test/test_contextutil.py @@ -0,0 +1,68 @@ +from pytest import raises +from teuthology import contextutil +from logging import ERROR + + +class TestSafeWhile(object): + + def setup(self): + contextutil.log.setLevel(ERROR) + self.fake_sleep = lambda s: True + self.s_while = contextutil.safe_while + + def test_6_5_10_deal(self): + with raises(contextutil.MaxWhileTries): + with self.s_while(_sleeper=self.fake_sleep) as proceed: + while proceed(): + pass + + def test_6_0_1_deal(self): + with raises(contextutil.MaxWhileTries) as error: + with self.s_while( + tries=1, + _sleeper=self.fake_sleep + ) as proceed: + while proceed(): + pass + + assert 'waiting for 6 seconds' in str(error) + + def test_1_0_10_deal(self): + with raises(contextutil.MaxWhileTries) as error: + with self.s_while( + sleep=1, + _sleeper=self.fake_sleep + ) as proceed: + while proceed(): + pass + + assert 'waiting for 10 seconds' in str(error) + + def test_6_1_10_deal(self): + with raises(contextutil.MaxWhileTries) as error: + with self.s_while( + increment=1, + _sleeper=self.fake_sleep + ) as proceed: + while proceed(): + pass + + assert 'waiting for 105 seconds' in str(error) + + def test_action(self): + with raises(contextutil.MaxWhileTries) as error: + with self.s_while( + action='doing the thing', + _sleeper=self.fake_sleep + ) as proceed: + while proceed(): + pass + + assert "'doing the thing' reached maximum tries" in str(error) + + def test_no_raise(self): + with self.s_while(_raise=False, _sleeper=self.fake_sleep) as proceed: + while proceed(): + pass + + assert True diff --git a/teuthology/test/test_describe_tests.py b/teuthology/test/test_describe_tests.py new file mode 100644 index 0000000000..c6577d66f3 --- /dev/null +++ b/teuthology/test/test_describe_tests.py @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- +import pytest + +from teuthology.test.fake_fs import make_fake_fstools +from teuthology.describe_tests import (tree_with_info, extract_info, + get_combinations) +from teuthology.exceptions import ParseError +from mock import MagicMock, patch + +realistic_fs = { + 'basic': { + '%': None, + 'base': { + 'install.yaml': + """meta: +- desc: install ceph +install: +""" + }, + 'clusters': { + 'fixed-1.yaml': + """meta: +- desc: single node cluster +roles: +- [osd.0, osd.1, osd.2, mon.a, mon.b, mon.c, client.0] +""", + 'fixed-2.yaml': + """meta: +- desc: couple node cluster +roles: +- [osd.0, osd.1, osd.2, mon.a, mon.b, mon.c] +- [client.0] +""", + 'fixed-3.yaml': + """meta: +- desc: triple node cluster +roles: +- [osd.0, osd.1, osd.2, mon.a, mon.b, mon.c] +- [client.0] +- [client.1] +""" + }, + 'workloads': { + 'rbd_api_tests_old_format.yaml': + """meta: +- desc: c/c++ librbd api tests with format 1 images + rbd_features: none +overrides: + ceph: + conf: + client: + rbd default format: 1 +tasks: +- workunit: + env: + RBD_FEATURES: 0 + clients: + client.0: + - rbd/test_librbd.sh +""", + 'rbd_api_tests.yaml': + """meta: +- desc: c/c++ librbd api tests with default settings + rbd_features: default +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh +""", + }, + }, +} + + +expected_tree = """├── % +├── base +│ └── install.yaml +├── clusters +│ ├── fixed-1.yaml +│ ├── fixed-2.yaml +│ └── fixed-3.yaml +└── workloads + ├── rbd_api_tests.yaml + └── rbd_api_tests_old_format.yaml""".split('\n') + + +expected_facets = [ + '', + '', + 'base', + '', + 'clusters', + 'clusters', + 'clusters', + '', + 'workloads', + 'workloads', +] + + +expected_desc = [ + '', + '', + 'install ceph', + '', + 'single node cluster', + 'couple node cluster', + 'triple node cluster', + '', + 'c/c++ librbd api tests with default settings', + 'c/c++ librbd api tests with format 1 images', +] + + +expected_rbd_features = [ + '', + '', + '', + '', + '', + '', + '', + '', + 'default', + 'none', +] + + +class TestDescribeTests(object): + + def setup(self): + self.mocks = dict() + self.patchers = dict() + exists, listdir, isfile, isdir, open = make_fake_fstools(realistic_fs) + for ppoint, fn in { + 'os.listdir': listdir, + 'os.path.isdir': isdir, + 'teuthology.describe_tests.open': open, + 'builtins.open': open, + 'os.path.exists': exists, + 'os.listdir': listdir, + 'os.path.isfile': isfile, + }.items(): + mockobj = MagicMock() + patcher = patch(ppoint, mockobj) + mockobj.side_effect = fn + patcher.start() + self.mocks[ppoint] = mockobj + self.patchers[ppoint] = patcher + + def stop_patchers(self): + for patcher in self.patchers.values(): + patcher.stop() + + def teardown(self): + self.stop_patchers() + + @staticmethod + def assert_expected_combo_headers(headers): + assert headers == (['subsuite depth 0'] + + sorted(set(filter(bool, expected_facets)))) + + def test_no_filters(self): + rows = tree_with_info('basic', [], False, '', []) + assert rows == [[x] for x in expected_tree] + + def test_single_filter(self): + rows = tree_with_info('basic', ['desc'], False, '', []) + assert rows == [list(_) for _ in zip(expected_tree, expected_desc)] + + rows = tree_with_info('basic', ['rbd_features'], False, '', []) + assert rows == [list(_) for _ in zip(expected_tree, expected_rbd_features)] + + def test_single_filter_with_facets(self): + rows = tree_with_info('basic', ['desc'], True, '', []) + assert rows == [list(_) for _ in zip(expected_tree, expected_facets, + expected_desc)] + + rows = tree_with_info('basic', ['rbd_features'], True, '', []) + assert rows == [list(_) for _ in zip(expected_tree, expected_facets, + expected_rbd_features)] + + def test_no_matching(self): + rows = tree_with_info('basic', ['extra'], False, '', []) + assert rows == [list(_) for _ in zip(expected_tree, [''] * len(expected_tree))] + + rows = tree_with_info('basic', ['extra'], True, '', []) + assert rows == [list(_) for _ in zip(expected_tree, expected_facets, + [''] * len(expected_tree))] + + def test_multiple_filters(self): + rows = tree_with_info('basic', ['desc', 'rbd_features'], False, '', []) + assert rows == [list(_) for _ in zip(expected_tree, + expected_desc, + expected_rbd_features)] + + rows = tree_with_info('basic', ['rbd_features', 'desc'], False, '', []) + assert rows == [list(_) for _ in zip(expected_tree, + expected_rbd_features, + expected_desc)] + + def test_multiple_filters_with_facets(self): + rows = tree_with_info('basic', ['desc', 'rbd_features'], True, '', []) + assert rows == [list(_) for _ in zip(expected_tree, + expected_facets, + expected_desc, + expected_rbd_features)] + + rows = tree_with_info('basic', ['rbd_features', 'desc'], True, '', []) + assert rows == [list(_) for _ in zip(expected_tree, + expected_facets, + expected_rbd_features, + expected_desc)] + + def test_combinations_only_facets(self): + headers, rows = get_combinations('basic', + fields=[], subset=None, limit=1, + filter_in=None, filter_out=None, filter_all=None, + include_facet=True) + self.assert_expected_combo_headers(headers) + assert rows == [['basic', 'install', 'fixed-1', 'rbd_api_tests']] + + def test_combinations_desc_features(self): + headers, rows = get_combinations('basic', + fields=['desc', 'rbd_features'], subset=None, limit=1, + filter_in=None, filter_out=None, filter_all=None, + include_facet=False) + assert headers == ['desc', 'rbd_features'] + descriptions = '\n'.join([ + 'install ceph', + 'single node cluster', + 'c/c++ librbd api tests with default settings', + ]) + assert rows == [[descriptions, 'default']] + + def test_combinations_filter_in(self): + headers, rows = get_combinations('basic', + fields=[], subset=None, limit=0, + filter_in=['old_format'], filter_out=None, filter_all=None, + include_facet=True) + self.assert_expected_combo_headers(headers) + assert rows == [ + ['basic', 'install', 'fixed-1', 'rbd_api_tests_old_format'], + ['basic', 'install', 'fixed-2', 'rbd_api_tests_old_format'], + ['basic', 'install', 'fixed-3', 'rbd_api_tests_old_format'], + ] + + def test_combinations_filter_out(self): + headers, rows = get_combinations('basic', + fields=[], subset=None, limit=0, + filter_in=None, filter_out=['old_format'], filter_all=None, + include_facet=True) + self.assert_expected_combo_headers(headers) + assert rows == [ + ['basic', 'install', 'fixed-1', 'rbd_api_tests'], + ['basic', 'install', 'fixed-2', 'rbd_api_tests'], + ['basic', 'install', 'fixed-3', 'rbd_api_tests'], + ] + + def test_combinations_filter_all(self): + headers, rows = get_combinations('basic', + fields=[], subset=None, limit=0, + filter_in=None, filter_out=None, + filter_all=['fixed-2', 'old_format'], + include_facet=True) + self.assert_expected_combo_headers(headers) + assert rows == [ + ['basic', 'install', 'fixed-2', 'rbd_api_tests_old_format'] + ] + + +@patch('teuthology.describe_tests.open') +@patch('os.path.isdir') +def test_extract_info_dir(m_isdir, m_open): + simple_fs = {'a': {'b.yaml': 'meta: [{foo: c}]'}} + _, _, _, m_isdir.side_effect, m_open.side_effect = \ + make_fake_fstools(simple_fs) + info = extract_info('a', []) + assert info == {} + + info = extract_info('a', ['foo', 'bar']) + assert info == {'foo': '', 'bar': ''} + + info = extract_info('a/b.yaml', ['foo', 'bar']) + assert info == {'foo': 'c', 'bar': ''} + + +@patch('teuthology.describe_tests.open') +@patch('os.path.isdir') +def check_parse_error(fs, m_isdir, m_open): + _, _, _, m_isdir.side_effect, m_open.side_effect = make_fake_fstools(fs) + with pytest.raises(ParseError): + a = extract_info('a.yaml', ['a']) + raise Exception(str(a)) + + +def test_extract_info_too_many_elements(): + check_parse_error({'a.yaml': 'meta: [{a: b}, {b: c}]'}) + + +def test_extract_info_not_a_list(): + check_parse_error({'a.yaml': 'meta: {a: b}'}) + + +def test_extract_info_not_a_dict(): + check_parse_error({'a.yaml': 'meta: [[a, b]]'}) + + +@patch('teuthology.describe_tests.open') +@patch('os.path.isdir') +def test_extract_info_empty_file(m_isdir, m_open): + simple_fs = {'a.yaml': ''} + _, _, _, m_isdir.side_effect, m_open.side_effect = \ + make_fake_fstools(simple_fs) + info = extract_info('a.yaml', []) + assert info == {} diff --git a/teuthology/test/test_email_sleep_before_teardown.py b/teuthology/test/test_email_sleep_before_teardown.py new file mode 100644 index 0000000000..fcc83b697d --- /dev/null +++ b/teuthology/test/test_email_sleep_before_teardown.py @@ -0,0 +1,81 @@ +from humanfriendly import format_timespan +from mock import Mock, patch +from pytest import mark +from teuthology.config import config +from teuthology.run_tasks import build_email_body as email_body +from textwrap import dedent + +class TestSleepBeforeTeardownEmail(object): + def setup(self): + config.results_ui_server = "http://example.com/" + config.archive_server = "http://qa-proxy.ceph.com/teuthology/" + + @mark.parametrize( + ['status', 'owner', 'suite_name', 'run_name', 'job_id', 'dura'], + [ + [ + 'pass', + 'noreply@host', + 'dummy', + 'run-name', + 123, + 3600, + ], + [ + 'fail', + 'noname', + 'yummy', + 'next-run', + 1000, + 99999, + ], + ] + ) + @patch("teuthology.run_tasks.time.time") + def test_sleep_before_teardown_email_body(self, m_time, status, owner, + suite_name, run_name, job_id, dura): + ctx = Mock() + archive_path='archive/path' + archive_dir='/archive/dir' + date_sec=3661 + date_str='1970-01-01 01:01:01' + m_time.return_value=float(date_sec) + duration_sec=dura + duration_str=format_timespan(duration_sec) + ref_body=dedent(""" + Teuthology job {run}/{job} has fallen asleep at {date} for {duration_str} + + Owner: {owner} + Suite Name: {suite} + Sleep Date: {date} + Sleep Time: {duration_sec} seconds ({duration_str}) + Job Info: http://example.com/{run}/ + Job Logs: http://qa-proxy.ceph.com/teuthology/path/{job}/ + Task Stack: a/b/c + Current Status: {status}""" + .format(duration_sec=duration_sec, duration_str=duration_str, + owner=owner, suite=suite_name, run=run_name, + job=job_id, status=status, date=date_str)) + print(ref_body) + ctx.config = dict( + archive_path=archive_path, + job_id=job_id, + suite=suite_name, + ) + if status == 'pass': + ctx.summary = dict( + success=True, + ) + elif status == 'fail': + ctx.summary = dict( + success=False, + ) + else: + ctx.summary = dict() + + ctx.owner = owner + ctx.name = run_name + ctx.archive_dir = archive_dir + tasks = [('a', None), ('b', None), ('c', None)] + (subj, body) = email_body(ctx, tasks, dura) + assert body == ref_body.lstrip('\n') diff --git a/teuthology/test/test_exit.py b/teuthology/test/test_exit.py new file mode 100644 index 0000000000..b7004fd757 --- /dev/null +++ b/teuthology/test/test_exit.py @@ -0,0 +1,99 @@ +import os +import random +import signal + +from unittest.mock import patch, Mock + +from teuthology import exit +from teuthology.test import skipif_teuthology_process + + +class TestExiter(object): + klass = exit.Exiter + + def setup(self): + self.pid = os.getpid() + + # Below, we patch os.kill() in such a way that the first time it is + # invoked it does actually send the signal. Any subsequent invocation + # won't send any signal - this is so we don't kill the process running + # our unit tests! + self.patcher_kill = patch( + 'teuthology.exit.os.kill', + wraps=os.kill, + ) + + #Keep a copy of the unpatched kill and call this in place of os.kill + #In the Exiter objects, the os.kill calls are patched. + #So the call_count should be 1. + self.kill_unpatched = os.kill + self.m_kill = self.patcher_kill.start() + + def m_kill_unwrap(pid, sig): + # Setting return_value of a mocked object disables the wrapping + if self.m_kill.call_count > 1: + self.m_kill.return_value = None + + self.m_kill.side_effect = m_kill_unwrap + + def teardown(self): + self.patcher_kill.stop() + del self.m_kill + + @skipif_teuthology_process + def test_noop(self): + sig = 15 + obj = self.klass() + assert len(obj.handlers) == 0 + assert signal.getsignal(sig) == 0 + + def test_basic(self): + sig = 15 + obj = self.klass() + m_func = Mock() + obj.add_handler(sig, m_func) + assert len(obj.handlers) == 1 + self.kill_unpatched(self.pid, sig) + assert m_func.call_count == 1 + assert self.m_kill.call_count == 1 + for arg_list in self.m_kill.call_args_list: + assert arg_list[0] == (self.pid, sig) + + def test_remove_handlers(self): + sig = [1, 15] + send_sig = random.choice(sig) + n = 3 + obj = self.klass() + handlers = list() + for i in range(n): + m_func = Mock(name="handler %s" % i) + handlers.append(obj.add_handler(sig, m_func)) + assert obj.handlers == handlers + for handler in handlers: + handler.remove() + assert obj.handlers == list() + self.kill_unpatched(self.pid, send_sig) + assert self.m_kill.call_count == 1 + for handler in handlers: + assert handler.func.call_count == 0 + + def test_n_handlers(self, n=10, sig=11): + if isinstance(sig, int): + send_sig = sig + else: + send_sig = random.choice(sig) + obj = self.klass() + handlers = list() + for i in range(n): + m_func = Mock(name="handler %s" % i) + handlers.append(obj.add_handler(sig, m_func)) + assert obj.handlers == handlers + self.kill_unpatched(self.pid, send_sig) + for i in range(n): + assert handlers[i].func.call_count == 1 + assert self.m_kill.call_count == 1 + for arg_list in self.m_kill.call_args_list: + assert arg_list[0] == (self.pid, send_sig) + + def test_multiple_signals(self): + self.test_n_handlers(n=3, sig=[1, 6, 11, 15]) diff --git a/teuthology/test/test_get_distro.py b/teuthology/test/test_get_distro.py new file mode 100644 index 0000000000..71104d39b4 --- /dev/null +++ b/teuthology/test/test_get_distro.py @@ -0,0 +1,47 @@ +from teuthology.misc import get_distro + + +class Mock: + pass + + +class TestGetDistro(object): + + def setup(self): + self.fake_ctx = Mock() + self.fake_ctx.config = {} + # os_type in ctx will always default to None + self.fake_ctx.os_type = None + + def test_default_distro(self): + distro = get_distro(self.fake_ctx) + assert distro == 'ubuntu' + + def test_argument(self): + # we don't want fake_ctx to have a config + self.fake_ctx = Mock() + self.fake_ctx.os_type = 'centos' + distro = get_distro(self.fake_ctx) + assert distro == 'centos' + + def test_teuth_config(self): + self.fake_ctx.config = {'os_type': 'fedora'} + distro = get_distro(self.fake_ctx) + assert distro == 'fedora' + + def test_argument_takes_precedence(self): + self.fake_ctx.config = {'os_type': 'fedora'} + self.fake_ctx.os_type = "centos" + distro = get_distro(self.fake_ctx) + assert distro == 'centos' + + def test_no_config_or_os_type(self): + self.fake_ctx = Mock() + self.fake_ctx.os_type = None + distro = get_distro(self.fake_ctx) + assert distro == 'ubuntu' + + def test_config_os_type_is_none(self): + self.fake_ctx.config["os_type"] = None + distro = get_distro(self.fake_ctx) + assert distro == 'ubuntu' diff --git a/teuthology/test/test_get_distro_version.py b/teuthology/test/test_get_distro_version.py new file mode 100644 index 0000000000..3adde92899 --- /dev/null +++ b/teuthology/test/test_get_distro_version.py @@ -0,0 +1,47 @@ +from teuthology.misc import get_distro_version + + +class Mock: + pass + + +class TestGetDistroVersion(object): + + def setup(self): + self.fake_ctx = Mock() + self.fake_ctx.config = {} + self.fake_ctx_noarg = Mock() + self.fake_ctx_noarg.config = {} + self.fake_ctx_noarg.os_version = None + self.fake_ctx.os_type = None + self.fake_ctx_noarg.os_type = None + + def test_default_distro_version(self): + # Default distro is ubuntu, default version of ubuntu is 20.04 + self.fake_ctx.os_version = None + distroversion = get_distro_version(self.fake_ctx) + assert distroversion == '20.04' + + def test_argument_version(self): + self.fake_ctx.os_version = '13.04' + distroversion = get_distro_version(self.fake_ctx) + assert distroversion == '13.04' + + def test_teuth_config_version(self): + #Argument takes precidence. + self.fake_ctx.os_version = '13.04' + self.fake_ctx.config = {'os_version': '13.10'} + distroversion = get_distro_version(self.fake_ctx) + assert distroversion == '13.04' + + def test_teuth_config_noarg_version(self): + self.fake_ctx_noarg.config = {'os_version': '13.04'} + distroversion = get_distro_version(self.fake_ctx_noarg) + assert distroversion == '13.04' + + def test_no_teuth_config(self): + self.fake_ctx = Mock() + self.fake_ctx.os_type = None + self.fake_ctx.os_version = '13.04' + distroversion = get_distro_version(self.fake_ctx) + assert distroversion == '13.04' diff --git a/teuthology/test/test_get_multi_machine_types.py b/teuthology/test/test_get_multi_machine_types.py new file mode 100644 index 0000000000..32a6b0263d --- /dev/null +++ b/teuthology/test/test_get_multi_machine_types.py @@ -0,0 +1,27 @@ +from teuthology import misc as teuthology + +class Mock: pass + +class TestGetMultiMachineTypes(object): + + def test_space(self): + give = 'burnupi plana vps' + expect = ['burnupi','plana','vps'] + assert teuthology.get_multi_machine_types(give) == expect + + def test_tab(self): + give = 'burnupi plana vps' + expect = ['burnupi','plana','vps'] + assert teuthology.get_multi_machine_types(give) == expect + + def test_comma(self): + give = 'burnupi,plana,vps' + expect = ['burnupi','plana','vps'] + assert teuthology.get_multi_machine_types(give) == expect + + def test_single(self): + give = 'burnupi' + expect = ['burnupi'] + assert teuthology.get_multi_machine_types(give) == expect + + diff --git a/teuthology/test/test_job_status.py b/teuthology/test/test_job_status.py new file mode 100644 index 0000000000..ee1b764e4f --- /dev/null +++ b/teuthology/test/test_job_status.py @@ -0,0 +1,60 @@ +from teuthology import job_status + + +class TestJobStatus(object): + def test_get_only_success_true(self): + summary = dict(success=True) + status = job_status.get_status(summary) + assert status == 'pass' + + def test_get_only_success_false(self): + summary = dict(success=False) + status = job_status.get_status(summary) + assert status == 'fail' + + def test_get_status_pass(self): + summary = dict(status='pass') + status = job_status.get_status(summary) + assert status == 'pass' + + def test_get_status_fail(self): + summary = dict(status='fail') + status = job_status.get_status(summary) + assert status == 'fail' + + def test_get_status_dead(self): + summary = dict(status='dead') + status = job_status.get_status(summary) + assert status == 'dead' + + def test_get_status_none(self): + summary = dict() + status = job_status.get_status(summary) + assert status is None + + def test_set_status_pass(self): + summary = dict() + job_status.set_status(summary, 'pass') + assert summary == dict(status='pass', success=True) + + def test_set_status_dead(self): + summary = dict() + job_status.set_status(summary, 'dead') + assert summary == dict(status='dead', success=False) + + def test_set_then_get_status_dead(self): + summary = dict() + job_status.set_status(summary, 'dead') + status = job_status.get_status(summary) + assert status == 'dead' + + def test_set_status_none(self): + summary = dict() + job_status.set_status(summary, None) + assert summary == dict() + + def test_legacy_fail(self): + summary = dict(success=True) + summary['success'] = False + status = job_status.get_status(summary) + assert status == 'fail' diff --git a/teuthology/test/test_ls.py b/teuthology/test/test_ls.py new file mode 100644 index 0000000000..631dcfd46f --- /dev/null +++ b/teuthology/test/test_ls.py @@ -0,0 +1,48 @@ +import pytest + +from unittest.mock import patch, Mock + +from teuthology import ls + + +class TestLs(object): + """ Tests for teuthology.ls """ + + @patch('os.path.isdir') + @patch('os.listdir') + def test_get_jobs(self, m_listdir, m_isdir): + m_listdir.return_value = ["1", "a", "3"] + m_isdir.return_value = True + results = ls.get_jobs("some/archive/dir") + assert results == ["1", "3"] + + @patch("yaml.safe_load_all") + @patch("teuthology.ls.get_jobs") + def test_ls(self, m_get_jobs, m_safe_load_all): + m_get_jobs.return_value = ["1", "2"] + m_safe_load_all.return_value = [{"failure_reason": "reasons"}] + ls.ls("some/archive/div", True) + + @patch("teuthology.ls.open") + @patch("teuthology.ls.get_jobs") + def test_ls_ioerror(self, m_get_jobs, m_open): + m_get_jobs.return_value = ["1", "2"] + m_open.side_effect = IOError() + with pytest.raises(IOError): + ls.ls("some/archive/dir", True) + + @patch("teuthology.ls.open") + @patch("os.popen") + @patch("os.path.isdir") + @patch("os.path.isfile") + def test_print_debug_info(self, m_isfile, m_isdir, m_popen, m_open): + m_isfile.return_value = True + m_isdir.return_value = True + m_popen.return_value = Mock() + cmdline = Mock() + cmdline.find = Mock(return_value=0) + m1 = Mock() + m2 = Mock() + m2.read = Mock(return_value=cmdline) + m_open.side_effect = [m1, m2] + ls.print_debug_info("the_job", "job/dir", "some/archive/dir") diff --git a/teuthology/test/test_misc.py b/teuthology/test/test_misc.py new file mode 100644 index 0000000000..97b499f7cb --- /dev/null +++ b/teuthology/test/test_misc.py @@ -0,0 +1,388 @@ +import argparse + +from unittest.mock import Mock, patch +from teuthology.orchestra import cluster +from teuthology.config import config +from teuthology import misc +import subprocess + +import pytest + + +class FakeRemote(object): + pass + + +def test_sh_normal(caplog): + assert misc.sh("/bin/echo ABC") == "ABC\n" + assert "truncated" not in caplog.text + + +def test_sh_truncate(caplog): + assert misc.sh("/bin/echo -n AB ; /bin/echo C", 2) == "ABC\n" + assert "truncated" in caplog.text + assert "ABC" not in caplog.text + + +def test_sh_fail(caplog): + with pytest.raises(subprocess.CalledProcessError) as excinfo: + misc.sh("/bin/echo -n AB ; /bin/echo C ; exit 111", 2) == "ABC\n" + assert excinfo.value.returncode == 111 + for record in caplog.records: + if record.levelname == 'ERROR': + assert ('replay full' in record.message or + 'ABC\n' == record.message) + +def test_sh_progress(caplog): + misc.sh("echo AB ; sleep 5 ; /bin/echo C", 2) == "ABC\n" + records = caplog.records + assert ':sh: ' in records[0].message + assert 'AB' == records[1].message + assert 'C' == records[2].message + # + # With a sleep 5 between the first and the second message, + # there must be at least 2 seconds between the log record + # of the first message and the log record of the second one + # + assert (records[2].created - records[1].created) > 2 + + +def test_wait_until_osds_up(): + ctx = argparse.Namespace() + ctx.daemons = Mock() + ctx.daemons.iter_daemons_of_role.return_value = list() + remote = FakeRemote() + + def s(self, **kwargs): + return 'IGNORED\n{"osds":[{"state":["up"]}]}' + + remote.sh = s + ctx.cluster = cluster.Cluster( + remotes=[ + (remote, ['osd.0', 'client.1']) + ], + ) + with patch.multiple( + misc, + get_testdir=lambda ctx: "TESTDIR", + ): + misc.wait_until_osds_up(ctx, ctx.cluster, remote) + + +def test_get_clients_simple(): + ctx = argparse.Namespace() + remote = FakeRemote() + ctx.cluster = cluster.Cluster( + remotes=[ + (remote, ['client.0', 'client.1']) + ], + ) + g = misc.get_clients(ctx=ctx, roles=['client.1']) + got = next(g) + assert len(got) == 2 + assert got[0] == ('1') + assert got[1] is remote + with pytest.raises(StopIteration): + next(g) + + +def test_get_mon_names(): + expected = [ + ([['mon.a', 'osd.0', 'mon.c']], 'ceph', ['mon.a', 'mon.c']), + ([['ceph.mon.a', 'osd.0', 'ceph.mon.c']], 'ceph', ['ceph.mon.a', 'ceph.mon.c']), + ([['mon.a', 'osd.0', 'mon.c'], ['ceph.mon.b']], 'ceph', ['mon.a', 'mon.c', 'ceph.mon.b']), + ([['mon.a', 'osd.0', 'mon.c'], ['foo.mon.a']], 'ceph', ['mon.a', 'mon.c']), + ([['mon.a', 'osd.0', 'mon.c'], ['foo.mon.a']], 'foo', ['foo.mon.a']), + ] + for remote_roles, cluster_name, expected_mons in expected: + ctx = argparse.Namespace() + ctx.cluster = Mock() + ctx.cluster.remotes = {i: roles for i, roles in enumerate(remote_roles)} + mons = misc.get_mon_names(ctx, cluster_name) + assert expected_mons == mons + + +def test_get_first_mon(): + expected = [ + ([['mon.a', 'osd.0', 'mon.c']], 'ceph', 'mon.a'), + ([['ceph.mon.a', 'osd.0', 'ceph.mon.c']], 'ceph', 'ceph.mon.a'), + ([['mon.a', 'osd.0', 'mon.c'], ['ceph.mon.b']], 'ceph', 'ceph.mon.b'), + ([['mon.a', 'osd.0', 'mon.c'], ['foo.mon.a']], 'ceph', 'mon.a'), + ([['foo.mon.b', 'osd.0', 'mon.c'], ['foo.mon.a']], 'foo', 'foo.mon.a'), + ] + for remote_roles, cluster_name, expected_mon in expected: + ctx = argparse.Namespace() + ctx.cluster = Mock() + ctx.cluster.remotes = {i: roles for i, roles in enumerate(remote_roles)} + mon = misc.get_first_mon(ctx, None, cluster_name) + assert expected_mon == mon + + +def test_roles_of_type(): + expected = [ + (['client.0', 'osd.0', 'ceph.osd.1'], 'osd', ['0', '1']), + (['client.0', 'osd.0', 'ceph.osd.1'], 'client', ['0']), + (['foo.client.1', 'bar.client.2.3', 'baz.osd.1'], 'mon', []), + (['foo.client.1', 'bar.client.2.3', 'baz.osd.1'], 'client', + ['1', '2.3']), + ] + for roles_for_host, type_, expected_ids in expected: + ids = list(misc.roles_of_type(roles_for_host, type_)) + assert ids == expected_ids + + +def test_cluster_roles_of_type(): + expected = [ + (['client.0', 'osd.0', 'ceph.osd.1'], 'osd', 'ceph', + ['osd.0', 'ceph.osd.1']), + (['client.0', 'osd.0', 'ceph.osd.1'], 'client', 'ceph', + ['client.0']), + (['foo.client.1', 'bar.client.2.3', 'baz.osd.1'], 'mon', None, []), + (['foo.client.1', 'bar.client.2.3', 'baz.osd.1'], 'client', None, + ['foo.client.1', 'bar.client.2.3']), + (['foo.client.1', 'bar.client.2.3', 'baz.osd.1'], 'client', 'bar', + ['bar.client.2.3']), + ] + for roles_for_host, type_, cluster_, expected_roles in expected: + roles = list(misc.cluster_roles_of_type(roles_for_host, type_, cluster_)) + assert roles == expected_roles + + +def test_all_roles_of_type(): + expected = [ + ([['client.0', 'osd.0', 'ceph.osd.1'], ['bar.osd.2']], + 'osd', ['0', '1', '2']), + ([['client.0', 'osd.0', 'ceph.osd.1'], ['bar.osd.2', 'baz.client.1']], + 'client', ['0', '1']), + ([['foo.client.1', 'bar.client.2.3'], ['baz.osd.1']], 'mon', []), + ([['foo.client.1', 'bar.client.2.3'], ['baz.osd.1', 'ceph.client.bar']], + 'client', ['1', '2.3', 'bar']), + ] + for host_roles, type_, expected_ids in expected: + cluster_ = Mock() + cluster_.remotes = dict(enumerate(host_roles)) + ids = list(misc.all_roles_of_type(cluster_, type_)) + assert ids == expected_ids + + +def test_get_http_log_path(): + # Fake configuration + archive_server = "http://example.com/server_root" + config.archive_server = archive_server + archive_dir = "/var/www/archives" + + path = misc.get_http_log_path(archive_dir) + assert path == "http://example.com/server_root/archives/" + + job_id = '12345' + path = misc.get_http_log_path(archive_dir, job_id) + assert path == "http://example.com/server_root/archives/12345/" + + # Inktank configuration + archive_server = "http://qa-proxy.ceph.com/teuthology/" + config.archive_server = archive_server + archive_dir = "/var/lib/teuthworker/archive/teuthology-2013-09-12_11:49:50-ceph-deploy-main-testing-basic-vps" + job_id = 31087 + path = misc.get_http_log_path(archive_dir, job_id) + assert path == "http://qa-proxy.ceph.com/teuthology/teuthology-2013-09-12_11:49:50-ceph-deploy-main-testing-basic-vps/31087/" + + path = misc.get_http_log_path(archive_dir) + assert path == "http://qa-proxy.ceph.com/teuthology/teuthology-2013-09-12_11:49:50-ceph-deploy-main-testing-basic-vps/" + + +def test_is_type(): + is_client = misc.is_type('client') + assert is_client('client.0') + assert is_client('ceph.client.0') + assert is_client('foo.client.0') + assert is_client('foo.client.bar.baz') + + with pytest.raises(ValueError): + is_client('') + is_client('client') + assert not is_client('foo.bar.baz') + assert not is_client('ceph.client') + assert not is_client('hadoop.main.0') + + +def test_is_type_in_cluster(): + is_c1_osd = misc.is_type('osd', 'c1') + with pytest.raises(ValueError): + is_c1_osd('') + assert not is_c1_osd('osd.0') + assert not is_c1_osd('ceph.osd.0') + assert not is_c1_osd('ceph.osd.0') + assert not is_c1_osd('c11.osd.0') + assert is_c1_osd('c1.osd.0') + assert is_c1_osd('c1.osd.999') + + +def test_get_mons(): + ips = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] + addrs = ['1.1.1.1:6789', '1.1.1.1:6790', '1.1.1.1:6791'] + + mons = misc.get_mons([['mon.a']], ips) + assert mons == {'mon.a': addrs[0]} + + mons = misc.get_mons([['cluster-a.mon.foo', 'client.b'], ['osd.0']], ips) + assert mons == {'cluster-a.mon.foo': addrs[0]} + + mons = misc.get_mons([['mon.a', 'mon.b', 'ceph.mon.c']], ips) + assert mons == {'mon.a': addrs[0], + 'mon.b': addrs[1], + 'ceph.mon.c': addrs[2]} + + mons = misc.get_mons([['mon.a'], ['mon.b'], ['ceph.mon.c']], ips) + assert mons == {'mon.a': addrs[0], + 'mon.b': ips[1] + ':6789', + 'ceph.mon.c': ips[2] + ':6789'} + + +def test_split_role(): + expected = { + 'client.0': ('ceph', 'client', '0'), + 'foo.client.0': ('foo', 'client', '0'), + 'bar.baz.x.y.z': ('bar', 'baz', 'x.y.z'), + 'mds.a-s-b': ('ceph', 'mds', 'a-s-b'), + } + + for role, expected_split in expected.items(): + actual_split = misc.split_role(role) + assert actual_split == expected_split + +class TestHostnames(object): + def setup(self): + config._conf = dict() + + def teardown(self): + config.load() + + def test_canonicalize_hostname(self): + host_base = 'box1' + result = misc.canonicalize_hostname(host_base) + assert result == 'ubuntu@box1.front.sepia.ceph.com' + + def test_decanonicalize_hostname(self): + host = 'ubuntu@box1.front.sepia.ceph.com' + result = misc.decanonicalize_hostname(host) + assert result == 'box1' + + def test_canonicalize_hostname_nouser(self): + host_base = 'box1' + result = misc.canonicalize_hostname(host_base, user=None) + assert result == 'box1.front.sepia.ceph.com' + + def test_decanonicalize_hostname_nouser(self): + host = 'box1.front.sepia.ceph.com' + result = misc.decanonicalize_hostname(host) + assert result == 'box1' + + def test_canonicalize_hostname_otherlab(self): + config.lab_domain = 'example.com' + host_base = 'box1' + result = misc.canonicalize_hostname(host_base) + assert result == 'ubuntu@box1.example.com' + + def test_decanonicalize_hostname_otherlab(self): + config.lab_domain = 'example.com' + host = 'ubuntu@box1.example.com' + result = misc.decanonicalize_hostname(host) + assert result == 'box1' + + def test_canonicalize_hostname_nodomain(self): + config.lab_domain = '' + host = 'box2' + result = misc.canonicalize_hostname(host) + assert result == 'ubuntu@' + host + + def test_decanonicalize_hostname_nodomain(self): + config.lab_domain = '' + host = 'ubuntu@box2' + result = misc.decanonicalize_hostname(host) + assert result == 'box2' + + def test_canonicalize_hostname_full_other_user(self): + config.lab_domain = 'example.com' + host = 'user1@box1.example.come' + result = misc.canonicalize_hostname(host) + assert result == 'user1@box1.example.com' + + def test_decanonicalize_hostname_full_other_user(self): + config.lab_domain = 'example.com' + host = 'user1@box1.example.come' + result = misc.decanonicalize_hostname(host) + assert result == 'box1' + +class TestMergeConfigs(object): + """ Tests merge_config and deep_merge in teuthology.misc """ + + @patch("os.path.exists") + @patch("yaml.safe_load") + @patch("teuthology.misc.open") + def test_merge_configs(self, m_open, m_safe_load, m_exists): + """ Only tests with one yaml file being passed, mainly just to test + the loop logic. The actual merge will be tested in subsequent + tests. + """ + expected = {"a": "b", "b": "c"} + m_exists.return_value = True + m_safe_load.return_value = expected + result = misc.merge_configs(["path/to/config1"]) + assert result == expected + m_open.assert_called_once_with("path/to/config1") + + def test_merge_configs_empty(self): + assert misc.merge_configs([]) == {} + + def test_deep_merge(self): + a = {"a": "b"} + b = {"b": "c"} + result = misc.deep_merge(a, b) + assert result == {"a": "b", "b": "c"} + + def test_overwrite_deep_merge(self): + a = {"a": "b"} + b = {"a": "overwritten", "b": "c"} + result = misc.deep_merge(a, b) + assert result == {"a": "overwritten", "b": "c"} + + def test_list_deep_merge(self): + a = [1, 2] + b = [3, 4] + result = misc.deep_merge(a, b) + assert result == [1, 2, 3, 4] + + def test_missing_list_deep_merge(self): + a = [1, 2] + b = "not a list" + with pytest.raises(AssertionError): + misc.deep_merge(a, b) + + def test_missing_a_deep_merge(self): + result = misc.deep_merge(None, [1, 2]) + assert result == [1, 2] + + def test_missing_b_deep_merge(self): + result = misc.deep_merge([1, 2], None) + assert result == [1, 2] + + def test_invalid_b_deep_merge(self): + with pytest.raises(AssertionError): + misc.deep_merge({"a": "b"}, "invalid") + + +class TestIsInDict(object): + def test_simple_membership(self): + assert misc.is_in_dict('a', 'foo', {'a':'foo', 'b':'bar'}) + + def test_dict_membership(self): + assert misc.is_in_dict( + 'a', {'sub1':'key1', 'sub2':'key2'}, + {'a':{'sub1':'key1', 'sub2':'key2', 'sub3':'key3'}} + ) + + def test_simple_nonmembership(self): + assert not misc.is_in_dict('a', 'foo', {'a':'bar', 'b':'foo'}) + + def test_nonmembership_with_presence_at_lower_level(self): + assert not misc.is_in_dict('a', 'foo', {'a':{'a': 'foo'}}) diff --git a/teuthology/test/test_nuke.py b/teuthology/test/test_nuke.py new file mode 100644 index 0000000000..6c02dee40a --- /dev/null +++ b/teuthology/test/test_nuke.py @@ -0,0 +1,293 @@ +import datetime +import json +import os +import pytest +import subprocess + +from unittest.mock import patch, Mock, DEFAULT, ANY + +from teuthology import nuke +from teuthology import misc +from teuthology.config import config +from teuthology.dispatcher.supervisor import create_fake_context + +class TestNuke(object): + + #@pytest.mark.skipif('OS_AUTH_URL' not in os.environ, + # reason="no OS_AUTH_URL environment variable") + def test_stale_openstack_volumes(self): + ctx = Mock() + ctx.teuthology_config = config + ctx.dry_run = False + now = datetime.datetime.strftime(datetime.datetime.now(), + "%Y-%m-%dT%H:%M:%S.000000") + id = '4bee3af9-febb-40c1-a17e-ff63edb415c5' + name = 'target1-0' + volume_list = json.loads( + '[{' + ' "ID": "' + id + '"' + '}]' + ) + # + # A volume created a second ago is left untouched + # + volume_show = ( + '{"id": "' + id + '", ' + '"created_at": "' + now + '", ' + '"display_name": "' + name + '"}' + ) + + with patch('teuthology.nuke.openstack_delete_volume') as m_os_del_vol: + with patch.object(nuke.OpenStack, 'run') as m_os_run: + m_os_run.return_value = volume_show + nuke.stale_openstack_volumes(ctx, volume_list) + m_os_del_vol.assert_not_called() + + + # + # A volume created long ago is destroyed + # + ancient = "2000-11-02T15:43:12.000000" + volume_show = ( + '{"id": "' + id + '", ' + '"created_at": "' + ancient + '", ' + '"display_name": "' + name + '"}' + ) + + with patch('teuthology.nuke.openstack_delete_volume') as m_os_del_vol: + with patch.object(nuke.OpenStack, 'run') as m_os_run: + m_os_run.return_value = volume_show + nuke.stale_openstack_volumes(ctx, volume_list) + m_os_del_vol.assert_called_with(id) + + # + # A volume that no longer exists is ignored + # + with patch('teuthology.nuke.openstack_delete_volume') as m_os_del_vol: + with patch.object(nuke.OpenStack, 'run') as m_os_run: + m_os_run.side_effect = subprocess.CalledProcessError('ERROR', 'FAIL') + nuke.stale_openstack_volumes(ctx, volume_list) + m_os_del_vol.assert_not_called() + + def test_stale_openstack_nodes(self): + ctx = Mock() + ctx.teuthology_config = config + ctx.dry_run = False + name = 'target1' + uuid = 'UUID1' + now = datetime.datetime.strftime(datetime.datetime.now(), + "%Y-%m-%d %H:%M:%S.%f") + # + # A node is not of type openstack is left untouched + # + with patch.multiple( + nuke, + unlock_one=DEFAULT, + ) as m: + nuke.stale_openstack_nodes(ctx, { + }, { + name: { 'locked_since': now, + 'machine_type': 'mira', }, + }) + m['unlock_one'].assert_not_called() + # + # A node that was just locked and does not have + # an instance yet is left untouched + # + with patch.multiple( + nuke, + unlock_one=DEFAULT, + ) as m: + nuke.stale_openstack_nodes(ctx, { + }, { + name: { 'locked_since': now, + 'machine_type': 'openstack', }, + }) + m['unlock_one'].assert_not_called() + # + # A node that has been locked for some time and + # has no instance is unlocked. + # + ancient = "2000-11-02 15:43:12.000000" + me = 'loic@dachary.org' + with patch.multiple( + nuke, + unlock_one=DEFAULT, + ) as m: + nuke.stale_openstack_nodes(ctx, { + }, { + name: { 'locked_since': ancient, + 'locked_by': me, + 'machine_type': 'openstack', }, + }) + m['unlock_one'].assert_called_with( + ctx, name, me) + # + # A node that has been locked for some time and + # has an instance is left untouched + # + with patch.multiple( + nuke, + unlock_one=DEFAULT, + ) as m: + nuke.stale_openstack_nodes(ctx, { + uuid: { + 'ID': uuid, + 'Name': name, + }, + }, { + name: { 'locked_since': ancient, + 'machine_type': 'openstack', }, + }) + m['unlock_one'].assert_not_called() + + def test_stale_openstack_instances(self): + if 'OS_AUTH_URL' not in os.environ: + pytest.skip('no OS_AUTH_URL environment variable') + ctx = Mock() + ctx.teuthology_config = config + ctx.dry_run = False + name = 'target1' + uuid = 'UUID1' + # + # An instance created a second ago is left untouched, + # even when it is not locked. + # + with patch.multiple( + nuke.OpenStackInstance, + exists=lambda _: True, + get_created=lambda _: 1, + __getitem__=lambda _, key: name, + destroy=DEFAULT, + ) as m: + nuke.stale_openstack_instances(ctx, { + uuid: { 'Name': name, }, + }, { + }) + m['destroy'].assert_not_called() + # + # An instance created a very long time ago is destroyed + # + with patch.multiple( + nuke.OpenStackInstance, + exists=lambda _: True, + get_created=lambda _: 1000000000, + __getitem__=lambda _, key: name, + destroy=DEFAULT, + ) as m: + nuke.stale_openstack_instances(ctx, { + uuid: { 'Name': name, }, + }, { + misc.canonicalize_hostname(name, user=None): {}, + }) + m['destroy'].assert_called_with() + # + # An instance that turns out to not exist any longer + # is ignored. + # + with patch.multiple( + nuke.OpenStackInstance, + exists=lambda _: False, + __getitem__=lambda _, key: name, + destroy=DEFAULT, + ) as m: + nuke.stale_openstack_instances(ctx, { + uuid: { 'Name': name, }, + }, { + misc.canonicalize_hostname(name, user=None): {}, + }) + m['destroy'].assert_not_called() + # + # An instance created but not locked after a while is + # destroyed. + # + with patch.multiple( + nuke.OpenStackInstance, + exists=lambda _: True, + get_created=lambda _: nuke.OPENSTACK_DELAY + 1, + __getitem__=lambda _, key: name, + destroy=DEFAULT, + ) as m: + nuke.stale_openstack_instances(ctx, { + uuid: { 'Name': name, }, + }, { + }) + m['destroy'].assert_called_with() + # + # An instance created within the expected lifetime + # of a job and locked is left untouched. + # + with patch.multiple( + nuke.OpenStackInstance, + exists=lambda _: True, + get_created=lambda _: nuke.OPENSTACK_DELAY + 1, + __getitem__=lambda _, key: name, + destroy=DEFAULT, + ) as m: + nuke.stale_openstack_instances(ctx, { + uuid: { 'Name': name, }, + }, { + misc.canonicalize_hostname(name, user=None): {}, + }) + m['destroy'].assert_not_called() + +def test_nuke_internal(): + job_config = dict( + owner='test_owner', + targets={'user@host1': 'key1', 'user@host2': 'key2'}, + archive_path='/path/to/test_run', + machine_type='test_machine', + os_type='centos', + os_version='8.3', + name='test_name', + ) + statuses = { + target: {'name': target, 'description': job_config['name']} + for target in job_config['targets'].keys() + } + ctx = create_fake_context(job_config) + + # minimal call using defaults + with patch.multiple( + nuke, + nuke_helper=DEFAULT, + unlock_one=DEFAULT, + get_status=lambda i: statuses[i], + ) as m: + nuke.nuke(ctx, True) + m['nuke_helper'].assert_called_with(ANY, True, False, True) + m['unlock_one'].assert_called() + + # don't unlock + with patch.multiple( + nuke, + nuke_helper=DEFAULT, + unlock_one=DEFAULT, + get_status=lambda i: statuses[i], + ) as m: + nuke.nuke(ctx, False) + m['nuke_helper'].assert_called_with(ANY, False, False, True) + m['unlock_one'].assert_not_called() + + # mimicing what teuthology-dispatcher --supervisor does + with patch.multiple( + nuke, + nuke_helper=DEFAULT, + unlock_one=DEFAULT, + get_status=lambda i: statuses[i], + ) as m: + nuke.nuke(ctx, False, True, False, True, False) + m['nuke_helper'].assert_called_with(ANY, False, True, False) + m['unlock_one'].assert_not_called() + + # no targets + del ctx.config['targets'] + with patch.multiple( + nuke, + nuke_helper=DEFAULT, + unlock_one=DEFAULT, + get_status=lambda i: statuses[i], + ) as m: + nuke.nuke(ctx, True) + m['nuke_helper'].assert_not_called() + m['unlock_one'].assert_not_called() diff --git a/teuthology/test/test_packaging.py b/teuthology/test/test_packaging.py new file mode 100644 index 0000000000..e497bb6907 --- /dev/null +++ b/teuthology/test/test_packaging.py @@ -0,0 +1,794 @@ +import pytest + +from unittest.mock import patch, Mock + +from teuthology import packaging +from teuthology.exceptions import VersionNotFoundError + +KOJI_TASK_RPMS_MATRIX = [ + ('tasks/6745/9666745/kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel'), + ('tasks/6745/9666745/kernel-modules-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-modules'), + ('tasks/6745/9666745/kernel-tools-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-tools'), + ('tasks/6745/9666745/kernel-tools-libs-devel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-tools-libs-devel'), + ('tasks/6745/9666745/kernel-headers-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-headers'), + ('tasks/6745/9666745/kernel-tools-debuginfo-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-tools-debuginfo'), + ('tasks/6745/9666745/kernel-debuginfo-common-x86_64-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-debuginfo-common-x86_64'), + ('tasks/6745/9666745/perf-debuginfo-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'perf-debuginfo'), + ('tasks/6745/9666745/kernel-modules-extra-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-modules-extra'), + ('tasks/6745/9666745/kernel-tools-libs-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-tools-libs'), + ('tasks/6745/9666745/kernel-core-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-core'), + ('tasks/6745/9666745/kernel-debuginfo-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-debuginfo'), + ('tasks/6745/9666745/python-perf-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'python-perf'), + ('tasks/6745/9666745/kernel-devel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'kernel-devel'), + ('tasks/6745/9666745/python-perf-debuginfo-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'python-perf-debuginfo'), + ('tasks/6745/9666745/perf-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', 'perf'), +] + +KOJI_TASK_RPMS = [rpm[0] for rpm in KOJI_TASK_RPMS_MATRIX] + + +class TestPackaging(object): + + def test_get_package_name_deb(self): + remote = Mock() + remote.os.package_type = "deb" + assert packaging.get_package_name('sqlite', remote) == "sqlite3" + + def test_get_package_name_rpm(self): + remote = Mock() + remote.os.package_type = "rpm" + assert packaging.get_package_name('sqlite', remote) is None + + def test_get_package_name_not_found(self): + remote = Mock() + remote.os.package_type = "rpm" + assert packaging.get_package_name('notthere', remote) is None + + def test_get_service_name_deb(self): + remote = Mock() + remote.os.package_type = "deb" + assert packaging.get_service_name('httpd', remote) == 'apache2' + + def test_get_service_name_rpm(self): + remote = Mock() + remote.os.package_type = "rpm" + assert packaging.get_service_name('httpd', remote) == 'httpd' + + def test_get_service_name_not_found(self): + remote = Mock() + remote.os.package_type = "rpm" + assert packaging.get_service_name('notthere', remote) is None + + def test_install_package_deb(self): + m_remote = Mock() + m_remote.os.package_type = "deb" + expected = [ + 'DEBIAN_FRONTEND=noninteractive', + 'sudo', + '-E', + 'apt-get', + '-y', + '--force-yes', + 'install', + 'apache2' + ] + packaging.install_package('apache2', m_remote) + m_remote.run.assert_called_with(args=expected) + + def test_install_package_rpm(self): + m_remote = Mock() + m_remote.os.package_type = "rpm" + expected = [ + 'sudo', + 'yum', + '-y', + 'install', + 'httpd' + ] + packaging.install_package('httpd', m_remote) + m_remote.run.assert_called_with(args=expected) + + def test_remove_package_deb(self): + m_remote = Mock() + m_remote.os.package_type = "deb" + expected = [ + 'DEBIAN_FRONTEND=noninteractive', + 'sudo', + '-E', + 'apt-get', + '-y', + 'purge', + 'apache2' + ] + packaging.remove_package('apache2', m_remote) + m_remote.run.assert_called_with(args=expected) + + def test_remove_package_rpm(self): + m_remote = Mock() + m_remote.os.package_type = "rpm" + expected = [ + 'sudo', + 'yum', + '-y', + 'erase', + 'httpd' + ] + packaging.remove_package('httpd', m_remote) + m_remote.run.assert_called_with(args=expected) + + def test_get_koji_package_name(self): + build_info = dict(version="3.10.0", release="123.20.1") + result = packaging.get_koji_package_name("kernel", build_info) + assert result == "kernel-3.10.0-123.20.1.x86_64.rpm" + + @patch("teuthology.packaging.config") + def test_get_kojiroot_base_url(self, m_config): + m_config.kojiroot_url = "http://kojiroot.com" + build_info = dict( + package_name="kernel", + version="3.10.0", + release="123.20.1", + ) + result = packaging.get_kojiroot_base_url(build_info) + expected = "http://kojiroot.com/kernel/3.10.0/123.20.1/x86_64/" + assert result == expected + + @patch("teuthology.packaging.config") + def test_get_koji_build_info_success(self, m_config): + m_config.kojihub_url = "http://kojihub.com" + m_proc = Mock() + expected = dict(foo="bar") + m_proc.exitstatus = 0 + m_proc.stdout.getvalue.return_value = str(expected) + m_remote = Mock() + m_remote.run.return_value = m_proc + result = packaging.get_koji_build_info(1, m_remote, dict()) + assert result == expected + args, kwargs = m_remote.run.call_args + expected_args = [ + 'python', '-c', + 'import koji; ' + 'hub = koji.ClientSession("http://kojihub.com"); ' + 'print(hub.getBuild(1))', + ] + assert expected_args == kwargs['args'] + + @patch("teuthology.packaging.config") + def test_get_koji_build_info_fail(self, m_config): + m_config.kojihub_url = "http://kojihub.com" + m_proc = Mock() + m_proc.exitstatus = 1 + m_remote = Mock() + m_remote.run.return_value = m_proc + m_ctx = Mock() + m_ctx.summary = dict() + with pytest.raises(RuntimeError): + packaging.get_koji_build_info(1, m_remote, m_ctx) + + @patch("teuthology.packaging.config") + def test_get_koji_task_result_success(self, m_config): + m_config.kojihub_url = "http://kojihub.com" + m_proc = Mock() + expected = dict(foo="bar") + m_proc.exitstatus = 0 + m_proc.stdout.getvalue.return_value = str(expected) + m_remote = Mock() + m_remote.run.return_value = m_proc + result = packaging.get_koji_task_result(1, m_remote, dict()) + assert result == expected + args, kwargs = m_remote.run.call_args + expected_args = [ + 'python', '-c', + 'import koji; ' + 'hub = koji.ClientSession("http://kojihub.com"); ' + 'print(hub.getTaskResult(1))', + ] + assert expected_args == kwargs['args'] + + @patch("teuthology.packaging.config") + def test_get_koji_task_result_fail(self, m_config): + m_config.kojihub_url = "http://kojihub.com" + m_proc = Mock() + m_proc.exitstatus = 1 + m_remote = Mock() + m_remote.run.return_value = m_proc + m_ctx = Mock() + m_ctx.summary = dict() + with pytest.raises(RuntimeError): + packaging.get_koji_task_result(1, m_remote, m_ctx) + + @patch("teuthology.packaging.config") + def test_get_koji_task_rpm_info_success(self, m_config): + m_config.koji_task_url = "http://kojihub.com/work" + expected = dict( + base_url="http://kojihub.com/work/tasks/6745/9666745/", + version="4.1.0-0.rc2.git2.1.fc23.x86_64", + rpm_name="kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm", + package_name="kernel", + ) + result = packaging.get_koji_task_rpm_info('kernel', KOJI_TASK_RPMS) + assert expected == result + + @patch("teuthology.packaging.config") + def test_get_koji_task_rpm_info_fail(self, m_config): + m_config.koji_task_url = "http://kojihub.com/work" + with pytest.raises(RuntimeError): + packaging.get_koji_task_rpm_info('ceph', KOJI_TASK_RPMS) + + def test_get_package_version_deb_found(self): + remote = Mock() + remote.os.package_type = "deb" + proc = Mock() + proc.exitstatus = 0 + proc.stdout.getvalue.return_value = "2.2" + remote.run.return_value = proc + result = packaging.get_package_version(remote, "apache2") + assert result == "2.2" + + def test_get_package_version_deb_command(self): + remote = Mock() + remote.os.package_type = "deb" + packaging.get_package_version(remote, "apache2") + args, kwargs = remote.run.call_args + expected_args = ['dpkg-query', '-W', '-f', '${Version}', 'apache2'] + assert expected_args == kwargs['args'] + + def test_get_package_version_rpm_found(self): + remote = Mock() + remote.os.package_type = "rpm" + proc = Mock() + proc.exitstatus = 0 + proc.stdout.getvalue.return_value = "2.2" + remote.run.return_value = proc + result = packaging.get_package_version(remote, "httpd") + assert result == "2.2" + + def test_get_package_version_rpm_command(self): + remote = Mock() + remote.os.package_type = "rpm" + packaging.get_package_version(remote, "httpd") + args, kwargs = remote.run.call_args + expected_args = ['rpm', '-q', 'httpd', '--qf', '%{VERSION}-%{RELEASE}'] + assert expected_args == kwargs['args'] + + def test_get_package_version_not_found(self): + remote = Mock() + remote.os.package_type = "rpm" + proc = Mock() + proc.exitstatus = 1 + proc.stdout.getvalue.return_value = "not installed" + remote.run.return_value = proc + result = packaging.get_package_version(remote, "httpd") + assert result is None + + def test_get_package_version_invalid_version(self): + # this tests the possibility that the package is not found + # but the exitstatus is still 0. Not entirely sure we'll ever + # hit this condition, but I want to test the codepath regardless + remote = Mock() + remote.os.package_type = "rpm" + proc = Mock() + proc.exitstatus = 0 + proc.stdout.getvalue.return_value = "not installed" + remote.run.return_value = proc + result = packaging.get_package_version(remote, "httpd") + assert result is None + + @pytest.mark.parametrize("input, expected", KOJI_TASK_RPMS_MATRIX) + def test_get_koji_task_result_package_name(self, input, expected): + assert packaging._get_koji_task_result_package_name(input) == expected + + @patch("requests.get") + def test_get_response_success(self, m_get): + resp = Mock() + resp.ok = True + m_get.return_value = resp + result = packaging._get_response("google.com") + assert result == resp + + @patch("requests.get") + def test_get_response_failed_wait(self, m_get): + resp = Mock() + resp.ok = False + m_get.return_value = resp + packaging._get_response("google.com", wait=True, sleep=1, tries=2) + assert m_get.call_count == 2 + + @patch("requests.get") + def test_get_response_failed_no_wait(self, m_get): + resp = Mock() + resp.ok = False + m_get.return_value = resp + packaging._get_response("google.com", sleep=1, tries=2) + assert m_get.call_count == 1 + + +class TestBuilderProject(object): + klass = None + + def setup(self): + if self.klass is None: + pytest.skip() + + def _get_remote(self, arch="x86_64", system_type="deb", distro="ubuntu", + codename="focal", version="20.04"): + rem = Mock() + rem.system_type = system_type + rem.os.name = distro + rem.os.codename = codename + rem.os.version = version + rem.arch = arch + + return rem + + def test_init_from_remote_base_url(self, expected=None): + assert expected is not None + rem = self._get_remote() + ctx = dict(foo="bar") + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + result = gp.base_url + assert result == expected + + def test_init_from_remote_base_url_debian(self, expected=None): + assert expected is not None + # remote.os.codename returns and empty string on debian + rem = self._get_remote(distro="debian", codename='', version="7.1") + ctx = dict(foo="bar") + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + result = gp.base_url + assert result == expected + + def test_init_from_config_base_url(self, expected=None): + assert expected is not None + config = dict( + os_type="ubuntu", + os_version="20.04", + sha1="sha1", + ) + gp = self.klass("ceph", config) + result = gp.base_url + print(self.m_get.call_args_list) + assert result == expected + + def test_init_from_config_branch_ref(self): + config = dict( + os_type="ubuntu", + os_version="20.04", + branch='jewel', + ) + gp = self.klass("ceph", config) + result = gp.uri_reference + expected = 'ref/jewel' + assert result == expected + + def test_init_from_config_tag_ref(self): + config = dict( + os_type="ubuntu", + os_version="20.04", + tag='v10.0.1', + ) + gp = self.klass("ceph", config) + result = gp.uri_reference + expected = 'ref/v10.0.1' + assert result == expected + + def test_init_from_config_tag_overrides_branch_ref(self, caplog): + config = dict( + os_type="ubuntu", + os_version="20.04", + branch='jewel', + tag='v10.0.1', + ) + gp = self.klass("ceph", config) + result = gp.uri_reference + expected = 'ref/v10.0.1' + assert result == expected + expected_log = 'More than one of ref, tag, branch, or sha1 supplied; using tag' + assert expected_log in caplog.text + return gp + + def test_init_from_config_branch_overrides_sha1(self, caplog): + config = dict( + os_type="ubuntu", + os_version="20.04", + branch='jewel', + sha1='sha1', + ) + gp = self.klass("ceph", config) + result = gp.uri_reference + expected = 'ref/jewel' + assert result == expected + expected_log = 'More than one of ref, tag, branch, or sha1 supplied; using branch' + assert expected_log in caplog.text + return gp + + REFERENCE_MATRIX = [ + ('the_ref', 'the_tag', 'the_branch', 'the_sha1', dict(ref='the_ref')), + (None, 'the_tag', 'the_branch', 'the_sha1', dict(tag='the_tag')), + (None, None, 'the_branch', 'the_sha1', dict(branch='the_branch')), + (None, None, None, 'the_sha1', dict(sha1='the_sha1')), + (None, None, 'the_branch', None, dict(branch='the_branch')), + ] + + @pytest.mark.parametrize( + "ref, tag, branch, sha1, expected", + REFERENCE_MATRIX, + ) + def test_choose_reference(self, ref, tag, branch, sha1, expected): + config = dict( + os_type='ubuntu', + os_version='18.04', + ) + if ref: + config['ref'] = ref + if tag: + config['tag'] = tag + if branch: + config['branch'] = branch + if sha1: + config['sha1'] = sha1 + gp = self.klass("ceph", config) + assert gp._choose_reference() == expected + + def test_get_package_version_found(self): + rem = self._get_remote() + ctx = dict(foo="bar") + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + assert gp.version == "0.90.0" + + @patch("teuthology.packaging._get_response") + def test_get_package_version_not_found(self, m_get_response): + rem = self._get_remote() + ctx = dict(foo="bar") + resp = Mock() + resp.ok = False + m_get_response.return_value = resp + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + with pytest.raises(VersionNotFoundError): + gp.version + + def test_get_package_sha1_fetched_found(self): + rem = self._get_remote() + ctx = dict(foo="bar") + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + assert gp.sha1 == "the_sha1" + + def test_get_package_sha1_fetched_not_found(self): + rem = self._get_remote() + ctx = dict(foo="bar") + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + assert not gp.sha1 + + DISTRO_MATRIX = [None] * 12 + + @pytest.mark.parametrize( + "matrix_index", + range(len(DISTRO_MATRIX)), + ) + def test_get_distro_remote(self, matrix_index): + (distro, version, codename, expected) = \ + self.DISTRO_MATRIX[matrix_index] + rem = self._get_remote(distro=distro, version=version, + codename=codename) + ctx = dict(foo="bar") + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + assert gp.distro == expected + + DISTRO_MATRIX_NOVER = [ + ('rhel', None, None, 'centos8'), + ('centos', None, None, 'centos8'), + ('fedora', None, None, 'fedora25'), + ('ubuntu', None, None, 'focal'), + ('debian', None, None, 'jessie'), + ] + + @pytest.mark.parametrize( + "matrix_index", + range(len(DISTRO_MATRIX) + len(DISTRO_MATRIX_NOVER)), + ) + def test_get_distro_config(self, matrix_index): + (distro, version, codename, expected) = \ + (self.DISTRO_MATRIX + self.DISTRO_MATRIX_NOVER)[matrix_index] + config = dict( + os_type=distro, + os_version=version + ) + gp = self.klass("ceph", config) + assert gp.distro == expected + + DIST_RELEASE_MATRIX = [ + ('rhel', '7.0', None, 'el7'), + ('centos', '6.5', None, 'el6'), + ('centos', '7.0', None, 'el7'), + ('centos', '7.1', None, 'el7'), + ('centos', '8.1', None, 'el8'), + ('fedora', '20', None, 'fc20'), + ('debian', '7.0', None, 'debian'), + ('debian', '7', None, 'debian'), + ('debian', '7.1', None, 'debian'), + ('ubuntu', '12.04', None, 'ubuntu'), + ('ubuntu', '14.04', None, 'ubuntu'), + ('ubuntu', '16.04', None, 'ubuntu'), + ('ubuntu', '18.04', None, 'ubuntu'), + ('ubuntu', '20.04', None, 'ubuntu'), + ] + + @pytest.mark.parametrize( + "matrix_index", + range(len(DIST_RELEASE_MATRIX)), + ) + def test_get_dist_release(self, matrix_index): + (distro, version, codename, expected) = \ + (self.DIST_RELEASE_MATRIX)[matrix_index] + rem = self._get_remote(distro=distro, version=version, + codename=codename) + ctx = dict(foo="bar") + gp = self.klass("ceph", {}, ctx=ctx, remote=rem) + assert gp.dist_release == expected + + +class TestShamanProject(TestBuilderProject): + klass = packaging.ShamanProject + + def setup(self): + self.p_config = patch('teuthology.packaging.config') + self.m_config = self.p_config.start() + self.m_config.use_shaman = True + self.m_config.shaman_host = 'shaman.ceph.com' + self.p_get_config_value = \ + patch('teuthology.packaging._get_config_value_for_remote') + self.m_get_config_value = self.p_get_config_value.start() + self.m_get_config_value.return_value = None + self.p_get = patch('requests.get') + self.m_get = self.p_get.start() + + def teardown(self): + self.p_config.stop() + self.p_get_config_value.stop() + self.p_get.stop() + + def test_init_from_remote_base_url(self): + # Here, we really just need to make sure ShamanProject._search() + # queries the right URL. So let's make _get_base_url() just pass that + # URL through and test that value. + def m_get_base_url(obj): + obj._search() + return self.m_get.call_args_list[0][0][0] + with patch( + 'teuthology.packaging.ShamanProject._get_base_url', + new=m_get_base_url, + ): + super(TestShamanProject, self)\ + .test_init_from_remote_base_url( + "https://shaman.ceph.com/api/search?status=ready" + "&project=ceph&flavor=default" + "&distros=ubuntu%2F20.04%2Fx86_64&ref=main" + ) + + def test_init_from_remote_base_url_debian(self): + # Here, we really just need to make sure ShamanProject._search() + # queries the right URL. So let's make _get_base_url() just pass that + # URL through and test that value. + def m_get_base_url(obj): + obj._search() + return self.m_get.call_args_list[0][0][0] + with patch( + 'teuthology.packaging.ShamanProject._get_base_url', + new=m_get_base_url, + ): + super(TestShamanProject, self)\ + .test_init_from_remote_base_url_debian( + "https://shaman.ceph.com/api/search?status=ready" + "&project=ceph&flavor=default" + "&distros=debian%2F7.1%2Fx86_64&ref=main" + ) + + def test_init_from_config_base_url(self): + # Here, we really just need to make sure ShamanProject._search() + # queries the right URL. So let's make _get_base_url() just pass that + # URL through and test that value. + def m_get_base_url(obj): + obj._search() + return self.m_get.call_args_list[0][0][0] + with patch( + 'teuthology.packaging.ShamanProject._get_base_url', + new=m_get_base_url, + ): + super(TestShamanProject, self).test_init_from_config_base_url( + "https://shaman.ceph.com/api/search?status=ready&project=ceph" \ + "&flavor=default&distros=ubuntu%2F20.04%2Fx86_64&sha1=sha1" + ) + + @patch('teuthology.packaging.ShamanProject._get_package_sha1') + def test_init_from_config_tag_ref(self, m_get_package_sha1): + m_get_package_sha1.return_value = 'the_sha1' + super(TestShamanProject, self).test_init_from_config_tag_ref() + + def test_init_from_config_tag_overrides_branch_ref(self, caplog): + with patch( + 'teuthology.packaging.repo_utils.ls_remote', + ) as m_ls_remote: + m_ls_remote.return_value = 'sha1_from_my_tag' + obj = super(TestShamanProject, self)\ + .test_init_from_config_tag_overrides_branch_ref(caplog) + search_uri = obj._search_uri + assert 'sha1=sha1_from_my_tag' in search_uri + assert 'jewel' not in search_uri + + def test_init_from_config_branch_overrides_sha1(self, caplog): + obj = super(TestShamanProject, self)\ + .test_init_from_config_branch_overrides_sha1(caplog) + search_uri = obj._search_uri + assert 'jewel' in search_uri + assert 'sha1' not in search_uri + + def test_get_package_version_found(self): + resp = Mock() + resp.ok = True + resp.json.return_value = [ + dict( + sha1='the_sha1', + extra=dict(package_manager_version='0.90.0'), + ) + ] + self.m_get.return_value = resp + super(TestShamanProject, self)\ + .test_get_package_version_found() + + def test_get_package_sha1_fetched_found(self): + resp = Mock() + resp.ok = True + resp.json.return_value = [dict(sha1='the_sha1')] + self.m_get.return_value = resp + super(TestShamanProject, self)\ + .test_get_package_sha1_fetched_found() + + def test_get_package_sha1_fetched_not_found(self): + resp = Mock() + resp.json.return_value = [] + self.m_get.return_value = resp + super(TestShamanProject, self)\ + .test_get_package_sha1_fetched_not_found() + + SHAMAN_SEARCH_RESPONSE = [ + { + "status": "ready", + "sha1": "534fc6d936bd506119f9e0921ff8cf8d47caa323", + "extra": { + "build_url": "https://jenkins.ceph.com/job/ceph-dev-build/ARCH=x86_64,AVAILABLE_ARCH=x86_64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic/48556/", + "root_build_cause": "SCMTRIGGER", + "version": "17.0.0-8856-g534fc6d9", + "node_name": "172.21.2.7+braggi07", + "job_name": "ceph-dev-build/ARCH=x86_64,AVAILABLE_ARCH=x86_64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic", + "package_manager_version": "17.0.0-8856.g534fc6d9" + }, + "url": "https://3.chacra.ceph.com/r/ceph/main/534fc6d936bd506119f9e0921ff8cf8d47caa323/centos/8/flavors/default/", + "modified": "2021-11-06 21:40:40.669823", + "distro_version": "8", + "project": "ceph", + "flavor": "default", + "ref": "main", + "chacra_url": "https://3.chacra.ceph.com/repos/ceph/main/534fc6d936bd506119f9e0921ff8cf8d47caa323/centos/8/flavors/default/", + "archs": [ + "x86_64", + "arm64", + "source" + ], + "distro": "centos" + } + ] + + SHAMAN_BUILDS_RESPONSE = [ + { + "status": "completed", + "sha1": "534fc6d936bd506119f9e0921ff8cf8d47caa323", + "distro_arch": "arm64", + "started": "2021-11-06 20:20:15.121203", + "completed": "2021-11-06 22:36:27.115950", + "extra": { + "node_name": "172.21.4.66+confusa04", + "version": "17.0.0-8856-g534fc6d9", + "build_user": "", + "root_build_cause": "SCMTRIGGER", + + "job_name": "ceph-dev-build/ARCH=arm64,AVAILABLE_ARCH=arm64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic" + }, + "modified": "2021-11-06 22:36:27.118043", + "distro_version": "8", + "project": "ceph", + "url": "https://jenkins.ceph.com/job/ceph-dev-build/ARCH=arm64,AVAILABLE_ARCH=arm64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic/48556/", + "log_url": "https://jenkins.ceph.com/job/ceph-dev-build/ARCH=arm64,AVAILABLE_ARCH=arm64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic/48556//consoleFull", + "flavor": "default", + "ref": "main", + "distro": "centos" + }, + { + "status": "completed", + "sha1": "534fc6d936bd506119f9e0921ff8cf8d47caa323", + "distro_arch": "x86_64", + "started": "2021-11-06 20:20:06.740692", + "completed": "2021-11-06 21:43:51.711970", + "extra": { + "node_name": "172.21.2.7+braggi07", + "version": "17.0.0-8856-g534fc6d9", + "build_user": "", + "root_build_cause": "SCMTRIGGER", + "job_name": "ceph-dev-build/ARCH=x86_64,AVAILABLE_ARCH=x86_64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic" + }, + "modified": "2021-11-06 21:43:51.713487", + "distro_version": "8", + "project": "ceph", + "url": "https://jenkins.ceph.com/job/ceph-dev-build/ARCH=x86_64,AVAILABLE_ARCH=x86_64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic/48556/", + "log_url": "https://jenkins.ceph.com/job/ceph-dev-build/ARCH=x86_64,AVAILABLE_ARCH=x86_64,AVAILABLE_DIST=centos8,DIST=centos8,MACHINE_SIZE=gigantic/48556//consoleFull", + "flavor": "default", + "ref": "main", + "distro": "centos" + } + ] + + def test_build_complete_success(self): + config = dict( + os_type="centos", + os_version="8", + branch='main', + arch='x86_64', + flavor='default', + ) + builder = self.klass("ceph", config) + + search_resp = Mock() + search_resp.ok = True + search_resp.json.return_value = self.SHAMAN_SEARCH_RESPONSE + self.m_get.return_value = search_resp + # cause builder to call requests.get and cache search_resp + builder.assert_result() + + build_resp = Mock() + build_resp.ok = True + self.m_get.return_value = build_resp + + # both archs completed, so x86_64 build is complete + builds = build_resp.json.return_value = self.SHAMAN_BUILDS_RESPONSE + assert builder.build_complete + + # mark the arm64 build failed, x86_64 should still be complete + builds[0]['status'] = "failed" + build_resp.json.return_value = builds + assert builder.build_complete + + # mark the x86_64 build failed, should show incomplete + builds[1]['status'] = "failed" + build_resp.json.return_value = builds + assert not builder.build_complete + + # mark the arm64 build complete again, x86_64 still incomplete + builds[0]['status'] = "completed" + build_resp.json.return_value = builds + assert not builder.build_complete + + DISTRO_MATRIX = [ + ('rhel', '7.0', None, 'centos/7'), + ('centos', '6.5', None, 'centos/6'), + ('centos', '7.0', None, 'centos/7'), + ('centos', '7.1', None, 'centos/7'), + ('centos', '8.1', None, 'centos/8'), + ('fedora', '20', None, 'fedora/20'), + ('ubuntu', '14.04', 'trusty', 'ubuntu/14.04'), + ('ubuntu', '14.04', None, 'ubuntu/14.04'), + ('debian', '7.0', None, 'debian/7.0'), + ('debian', '7', None, 'debian/7'), + ('debian', '7.1', None, 'debian/7.1'), + ('ubuntu', '12.04', None, 'ubuntu/12.04'), + ('ubuntu', '14.04', None, 'ubuntu/14.04'), + ('ubuntu', '16.04', None, 'ubuntu/16.04'), + ('ubuntu', '18.04', None, 'ubuntu/18.04'), + ('ubuntu', '20.04', None, 'ubuntu/20.04'), + ] + + DISTRO_MATRIX_NOVER = [ + ('rhel', None, None, 'centos/8'), + ('centos', None, None, 'centos/8'), + ('fedora', None, None, 'fedora/25'), + ('ubuntu', None, None, 'ubuntu/20.04'), + ('debian', None, None, 'debian/8.0'), + ] diff --git a/teuthology/test/test_parallel.py b/teuthology/test/test_parallel.py new file mode 100644 index 0000000000..bba1d57bf7 --- /dev/null +++ b/teuthology/test/test_parallel.py @@ -0,0 +1,28 @@ +from teuthology.parallel import parallel + + +def identity(item, input_set=None, remove=False): + if input_set is not None: + assert item in input_set + if remove: + input_set.remove(item) + return item + + +class TestParallel(object): + def test_basic(self): + in_set = set(range(10)) + with parallel() as para: + for i in in_set: + para.spawn(identity, i, in_set, remove=True) + assert para.any_spawned is True + assert para.count == len(in_set) + + def test_result(self): + in_set = set(range(10)) + with parallel() as para: + for i in in_set: + para.spawn(identity, i, in_set) + for result in para: + in_set.remove(result) + diff --git a/teuthology/test/test_repo_utils.py b/teuthology/test/test_repo_utils.py new file mode 100644 index 0000000000..a155fd410e --- /dev/null +++ b/teuthology/test/test_repo_utils.py @@ -0,0 +1,242 @@ +import logging +import unittest.mock as mock +import os +import os.path +from pytest import raises, mark +import shutil +import subprocess +import tempfile +from packaging.version import parse + +from teuthology.exceptions import BranchNotFoundError, CommitNotFoundError +from teuthology import repo_utils +from teuthology import parallel +repo_utils.log.setLevel(logging.WARNING) + + +class TestRepoUtils(object): + + @classmethod + def setup_class(cls): + cls.temp_path = tempfile.mkdtemp(prefix='test_repo-') + cls.dest_path = cls.temp_path + '/empty_dest' + cls.src_path = cls.temp_path + '/empty_src' + + if 'TEST_ONLINE' in os.environ: + cls.repo_url = 'https://github.com/ceph/empty.git' + cls.commit = '71245d8e454a06a38a00bff09d8f19607c72e8bf' + else: + cls.repo_url = 'file://' + cls.src_path + cls.commit = None + + cls.git_version = parse( + subprocess.check_output(('git', 'version') + ).decode().strip().split(' ')[-1]) + + @classmethod + def teardown_class(cls): + shutil.rmtree(cls.temp_path) + + def setup_method(self, method): + # In git 2.28.0, the --initial-branch flag was added. + if self.git_version >= parse("2.28.0"): + subprocess.check_call( + ('git', 'init', '--initial-branch', 'main', self.src_path) + ) + else: + subprocess.check_call(('git', 'init', self.src_path)) + subprocess.check_call( + ('git', 'checkout', '-b', 'main'), + cwd=self.src_path, + ) + proc = subprocess.Popen( + ('git', 'config', 'user.email', 'test@ceph.com'), + cwd=self.src_path, + stdout=subprocess.PIPE, + ) + assert proc.wait() == 0 + proc = subprocess.Popen( + ('git', 'config', 'user.name', 'Test User'), + cwd=self.src_path, + stdout=subprocess.PIPE, + ) + assert proc.wait() == 0 + proc = subprocess.Popen( + ('git', 'commit', '--allow-empty', '--allow-empty-message', + '--no-edit'), + cwd=self.src_path, + stdout=subprocess.PIPE, + ) + assert proc.wait() == 0 + if not self.commit: + result = subprocess.check_output( + 'git rev-parse HEAD', + shell=True, + cwd=self.src_path, + ).split() + assert result + self.commit = result[0].decode() + + def teardown_method(self, method): + shutil.rmtree(self.src_path, ignore_errors=True) + shutil.rmtree(self.dest_path, ignore_errors=True) + + def test_clone_repo_existing_branch(self): + repo_utils.clone_repo(self.repo_url, self.dest_path, 'main', self.commit) + assert os.path.exists(self.dest_path) + + def test_clone_repo_non_existing_branch(self): + with raises(BranchNotFoundError): + repo_utils.clone_repo(self.repo_url, self.dest_path, 'nobranch', self.commit) + assert not os.path.exists(self.dest_path) + + def test_fetch_no_repo(self): + fake_dest_path = self.temp_path + '/not_a_repo' + assert not os.path.exists(fake_dest_path) + with raises(OSError): + repo_utils.fetch(fake_dest_path) + assert not os.path.exists(fake_dest_path) + + def test_fetch_noop(self): + repo_utils.clone_repo(self.repo_url, self.dest_path, 'main', self.commit) + repo_utils.fetch(self.dest_path) + assert os.path.exists(self.dest_path) + + def test_fetch_branch_no_repo(self): + fake_dest_path = self.temp_path + '/not_a_repo' + assert not os.path.exists(fake_dest_path) + with raises(OSError): + repo_utils.fetch_branch(fake_dest_path, 'main') + assert not os.path.exists(fake_dest_path) + + def test_fetch_branch_fake_branch(self): + repo_utils.clone_repo(self.repo_url, self.dest_path, 'main', self.commit) + with raises(BranchNotFoundError): + repo_utils.fetch_branch(self.dest_path, 'nobranch') + + @mark.parametrize('git_str', + ["fatal: couldn't find remote ref", + "fatal: Couldn't find remote ref"]) + @mock.patch('subprocess.Popen') + def test_fetch_branch_different_git_versions(self, mock_popen, git_str): + """ + Newer git versions return a lower case string + See: https://github.com/git/git/commit/0b9c3afdbfb629363 + """ + branch_name = 'nobranch' + process_mock = mock.Mock() + attrs = { + 'wait.return_value': 1, + 'stdout.read.return_value': f"{git_str} {branch_name}".encode(), + } + process_mock.configure_mock(**attrs) + mock_popen.return_value = process_mock + with raises(BranchNotFoundError): + repo_utils.fetch_branch('', branch_name) + + def test_enforce_existing_branch(self): + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main') + assert os.path.exists(self.dest_path) + + def test_enforce_existing_commit(self): + import logging + logging.getLogger().info(subprocess.check_output("git branch", shell=True, cwd=self.src_path)) + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', self.commit) + assert os.path.exists(self.dest_path) + + def test_enforce_non_existing_branch(self): + with raises(BranchNotFoundError): + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'blah', self.commit) + assert not os.path.exists(self.dest_path) + + def test_enforce_non_existing_commit(self): + with raises(CommitNotFoundError): + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', 'c69e90807d222c1719c45c8c758bf6fac3d985f1') + assert not os.path.exists(self.dest_path) + + def test_enforce_multiple_calls_same_branch(self): + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', self.commit) + assert os.path.exists(self.dest_path) + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', self.commit) + assert os.path.exists(self.dest_path) + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', self.commit) + assert os.path.exists(self.dest_path) + + def test_enforce_multiple_calls_different_branches(self): + with raises(BranchNotFoundError): + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'blah1') + assert not os.path.exists(self.dest_path) + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', self.commit) + assert os.path.exists(self.dest_path) + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', self.commit) + assert os.path.exists(self.dest_path) + with raises(BranchNotFoundError): + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'blah2') + assert not os.path.exists(self.dest_path) + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, + 'main', self.commit) + assert os.path.exists(self.dest_path) + + def test_enforce_invalid_branch(self): + with raises(ValueError): + repo_utils.enforce_repo_state(self.repo_url, self.dest_path, 'a b', self.commit) + + def test_simultaneous_access(self): + count = 5 + with parallel.parallel() as p: + for i in range(count): + p.spawn(repo_utils.enforce_repo_state, self.repo_url, + self.dest_path, 'main', self.commit) + for result in p: + assert result is None + + def test_simultaneous_access_different_branches(self): + branches = [('main', self.commit), ('main', self.commit), ('nobranch', 'nocommit'), + ('nobranch', 'nocommit'), ('main', self.commit), ('nobranch', 'nocommit')] + + with parallel.parallel() as p: + for branch, commit in branches: + if branch == 'main': + p.spawn(repo_utils.enforce_repo_state, self.repo_url, + self.dest_path, branch, commit) + else: + dest_path = self.dest_path + '_' + branch + + def func(): + repo_utils.enforce_repo_state( + self.repo_url, dest_path, + branch, commit) + p.spawn( + raises, + BranchNotFoundError, + func, + ) + for result in p: + pass + + URLS_AND_DIRNAMES = [ + ('git@git.ceph.com/ceph-qa-suite.git', 'git.ceph.com_ceph-qa-suite'), + ('git://git.ceph.com/ceph-qa-suite.git', 'git.ceph.com_ceph-qa-suite'), + ('https://github.com/ceph/ceph', 'github.com_ceph_ceph'), + ('https://github.com/liewegas/ceph.git', 'github.com_liewegas_ceph'), + ('file:///my/dir/has/ceph.git', 'my_dir_has_ceph'), + ] + + @mark.parametrize("input_, expected", URLS_AND_DIRNAMES) + def test_url_to_dirname(self, input_, expected): + assert repo_utils.url_to_dirname(input_) == expected + + def test_current_branch(self): + repo_utils.clone_repo(self.repo_url, self.dest_path, 'main', self.commit) + assert repo_utils.current_branch(self.dest_path) == "main" \ No newline at end of file diff --git a/teuthology/test/test_report.py b/teuthology/test/test_report.py new file mode 100644 index 0000000000..1a33b12b4b --- /dev/null +++ b/teuthology/test/test_report.py @@ -0,0 +1,77 @@ +import yaml +import json +from teuthology.test import fake_archive +from teuthology import report + + +class TestSerializer(object): + def setup(self): + self.archive = fake_archive.FakeArchive() + self.archive.setup() + self.archive_base = self.archive.archive_base + self.reporter = report.ResultsReporter(archive_base=self.archive_base) + + def teardown(self): + self.archive.teardown() + + def test_all_runs_one_run(self): + run_name = "test_all_runs" + yaml_path = "examples/3node_ceph.yaml" + job_count = 3 + self.archive.create_fake_run(run_name, job_count, yaml_path) + assert [run_name] == self.reporter.serializer.all_runs + + def test_all_runs_three_runs(self): + run_count = 3 + runs = {} + for i in range(run_count): + run_name = "run #%s" % i + yaml_path = "examples/3node_ceph.yaml" + job_count = 3 + job_ids = self.archive.create_fake_run( + run_name, + job_count, + yaml_path) + runs[run_name] = job_ids + assert sorted(runs.keys()) == sorted(self.reporter.serializer.all_runs) + + def test_jobs_for_run(self): + run_name = "test_jobs_for_run" + yaml_path = "examples/3node_ceph.yaml" + job_count = 3 + jobs = self.archive.create_fake_run(run_name, job_count, yaml_path) + job_ids = [str(job['job_id']) for job in jobs] + + got_jobs = self.reporter.serializer.jobs_for_run(run_name) + assert sorted(job_ids) == sorted(got_jobs.keys()) + + def test_running_jobs_for_run(self): + run_name = "test_jobs_for_run" + yaml_path = "examples/3node_ceph.yaml" + job_count = 10 + num_hung = 3 + self.archive.create_fake_run(run_name, job_count, yaml_path, + num_hung=num_hung) + + got_jobs = self.reporter.serializer.running_jobs_for_run(run_name) + assert len(got_jobs) == num_hung + + def test_json_for_job(self): + run_name = "test_json_for_job" + yaml_path = "examples/3node_ceph.yaml" + job_count = 1 + jobs = self.archive.create_fake_run(run_name, job_count, yaml_path) + job = jobs[0] + + with open(yaml_path) as yaml_file: + obj_from_yaml = yaml.safe_load(yaml_file) + full_obj = obj_from_yaml.copy() + full_obj.update(job['info']) + full_obj.update(job['summary']) + + out_json = self.reporter.serializer.json_for_job( + run_name, str(job['job_id'])) + out_obj = json.loads(out_json) + assert full_obj == out_obj + + diff --git a/teuthology/test/test_results.py b/teuthology/test/test_results.py new file mode 100644 index 0000000000..f1dc58747c --- /dev/null +++ b/teuthology/test/test_results.py @@ -0,0 +1,155 @@ +import textwrap +from teuthology.config import config +from teuthology import results +from teuthology import report + +from unittest.mock import patch, DEFAULT + + +class TestResultsEmail(object): + reference = { + 'run_name': 'test_name', + 'jobs': [ + # Running + {'description': 'description for job with name test_name', + 'job_id': 30481, + 'name': 'test_name', + 'log_href': 'http://qa-proxy.ceph.com/teuthology/test_name/30481/teuthology.log', # noqa + 'owner': 'job@owner', + 'duration': None, + 'status': 'running', + }, + # Waiting + {'description': 'description for job with name test_name', + 'job_id': 62965, + 'name': 'test_name', + 'log_href': 'http://qa-proxy.ceph.com/teuthology/test_name/30481/teuthology.log', # noqa + 'owner': 'job@owner', + 'duration': None, + 'status': 'waiting', + }, + # Queued + {'description': 'description for job with name test_name', + 'job_id': 79063, + 'name': 'test_name', + 'log_href': 'http://qa-proxy.ceph.com/teuthology/test_name/30481/teuthology.log', # noqa + 'owner': 'job@owner', + 'duration': None, + 'status': 'queued', + }, + # Failed + {'description': 'description for job with name test_name', + 'job_id': 88979, + 'name': 'test_name', + 'log_href': 'http://qa-proxy.ceph.com/teuthology/test_name/88979/teuthology.log', # noqa + 'owner': 'job@owner', + 'duration': 35190, + 'success': False, + 'status': 'fail', + 'failure_reason': 'Failure reason!', + }, + # Dead + {'description': 'description for job with name test_name', + 'job_id': 69152, + 'name': 'test_name', + 'log_href': 'http://qa-proxy.ceph.com/teuthology/test_name/69152/teuthology.log', # noqa + 'owner': 'job@owner', + 'duration': 5225, + 'success': False, + 'status': 'dead', + 'failure_reason': 'Dead reason!', + }, + # Passed + {'description': 'description for job with name test_name', + 'job_id': 68369, + 'name': 'test_name', + 'log_href': 'http://qa-proxy.ceph.com/teuthology/test_name/68369/teuthology.log', # noqa + 'owner': 'job@owner', + 'duration': 33771, + 'success': True, + 'status': 'pass', + }, + ], + 'subject': '1 fail, 1 dead, 1 running, 1 waiting, 1 queued, 1 pass in test_name', # noqa + 'body': textwrap.dedent(""" + Test Run: test_name + ================================================================= + info: http://example.com/test_name/ + logs: http://qa-proxy.ceph.com/teuthology/test_name/ + failed: 1 + dead: 1 + running: 1 + waiting: 1 + queued: 1 + passed: 1 + + + Fail + ================================================================= + [88979] description for job with name test_name + ----------------------------------------------------------------- + time: 09:46:30 + info: http://example.com/test_name/88979/ + log: http://qa-proxy.ceph.com/teuthology/test_name/88979/ + + Failure reason! + + + + Dead + ================================================================= + [69152] description for job with name test_name + ----------------------------------------------------------------- + time: 01:27:05 + info: http://example.com/test_name/69152/ + log: http://qa-proxy.ceph.com/teuthology/test_name/69152/ + + Dead reason! + + + + Running + ================================================================= + [30481] description for job with name test_name + info: http://example.com/test_name/30481/ + + + + Waiting + ================================================================= + [62965] description for job with name test_name + info: http://example.com/test_name/62965/ + + + + Queued + ================================================================= + [79063] description for job with name test_name + info: http://example.com/test_name/79063/ + + + + Pass + ================================================================= + [68369] description for job with name test_name + time: 09:22:51 + info: http://example.com/test_name/68369/ + """).strip(), + } + + def setup(self): + config.results_ui_server = "http://example.com/" + config.archive_server = "http://qa-proxy.ceph.com/teuthology/" + + def test_build_email_body(self): + run_name = self.reference['run_name'] + with patch.multiple( + report, + ResultsReporter=DEFAULT, + ): + reporter = report.ResultsReporter() + reporter.get_jobs.return_value = self.reference['jobs'] + (subject, body) = results.build_email_body( + run_name, _reporter=reporter) + assert subject == self.reference['subject'] + assert body == self.reference['body'] diff --git a/teuthology/test/test_run.py b/teuthology/test/test_run.py new file mode 100644 index 0000000000..7a452f6872 --- /dev/null +++ b/teuthology/test/test_run.py @@ -0,0 +1,247 @@ +import pytest +import docopt + +from unittest.mock import patch, call, Mock + +from teuthology import run +from scripts import run as scripts_run +from teuthology.test import skipif_teuthology_process + + +class TestRun(object): + """ Tests for teuthology.run """ + + @patch("teuthology.log.setLevel") + @patch("teuthology.setup_log_file") + @patch("os.mkdir") + def test_set_up_logging(self, m_mkdir, m_setup_log_file, m_setLevel): + run.set_up_logging(True, "path/to/archive") + m_mkdir.assert_called_with("path/to/archive") + m_setup_log_file.assert_called_with("path/to/archive/teuthology.log") + assert m_setLevel.called + + # because of how we import things, mock merge_configs from run - where it's used + # see: http://www.voidspace.org.uk/python/mock/patch.html#where-to-patch + @patch("teuthology.run.merge_configs") + def test_setup_config(self, m_merge_configs): + config = {"job_id": 1, "foo": "bar"} + m_merge_configs.return_value = config + result = run.setup_config(["some/config.yaml"]) + assert m_merge_configs.called + assert result["job_id"] == "1" + assert result["foo"] == "bar" + + @patch("teuthology.run.merge_configs") + def test_setup_config_targets_ok(self, m_merge_configs): + config = {"targets": list(range(4)), "roles": list(range(2))} + m_merge_configs.return_value = config + result = run.setup_config(["some/config.yaml"]) + assert result["targets"] == [0, 1, 2, 3] + assert result["roles"] == [0, 1] + + @patch("teuthology.run.merge_configs") + def test_setup_config_targets_invalid(self, m_merge_configs): + config = {"targets": range(2), "roles": range(4)} + m_merge_configs.return_value = config + with pytest.raises(AssertionError): + run.setup_config(["some/config.yaml"]) + + @patch("teuthology.run.open") + def test_write_initial_metadata(self, m_open): + config = {"job_id": "123", "foo": "bar"} + run.write_initial_metadata( + "some/archive/dir", + config, + "the_name", + "the_description", + "the_owner", + ) + expected = [ + call('some/archive/dir/pid', 'w'), + call('some/archive/dir/owner', 'w'), + call('some/archive/dir/orig.config.yaml', 'w'), + call('some/archive/dir/info.yaml', 'w') + ] + assert m_open.call_args_list == expected + + def test_get_machine_type(self): + result = run.get_machine_type(None, {"machine-type": "the_machine_type"}) + assert result == "the_machine_type" + + def test_get_summary(self): + result = run.get_summary("the_owner", "the_description") + assert result == {"owner": "the_owner", "description": "the_description", "success": True} + result = run.get_summary("the_owner", None) + assert result == {"owner": "the_owner", "success": True} + + def test_validate_tasks_invalid(self): + config = {"tasks": [{"kernel": "can't be here"}]} + with pytest.raises(AssertionError) as excinfo: + run.validate_tasks(config) + assert excinfo.value.args[0].startswith("kernel installation") + + def test_validate_task_no_tasks(self): + result = run.validate_tasks({}) + assert result == [] + + def test_validate_tasks_valid(self): + expected = [{"foo": "bar"}, {"bar": "foo"}] + result = run.validate_tasks({"tasks": expected}) + assert result == expected + + def test_validate_tasks_is_list(self): + with pytest.raises(AssertionError) as excinfo: + run.validate_tasks({"tasks": {"foo": "bar"}}) + assert excinfo.value.args[0].startswith("Expected list") + + def test_get_initial_tasks_invalid(self): + with pytest.raises(AssertionError) as excinfo: + run.get_initial_tasks(True, {"targets": "can't be here", + "roles": "roles" }, "machine_type") + assert excinfo.value.args[0].startswith("You cannot") + + def test_get_inital_tasks(self): + config = {"roles": range(2), "kernel": "the_kernel", "use_existing_cluster": False} + result = run.get_initial_tasks(True, config, "machine_type") + assert {"internal.lock_machines": (2, "machine_type")} in result + assert {"kernel": "the_kernel"} in result + # added because use_existing_cluster == False + assert {'internal.vm_setup': None} in result + assert {'internal.buildpackages_prep': None} in result + + # When tests are run in a teuthology process using the py.test + # API, tasks will have already been imported. Patching sys.path + # (and even calling sys.path_importer_cache.clear()) doesn't seem + # to help "forget" where the tasks are, keeping this test from + # passing. The test isn't critical to run in every single + # environment, so skip. + @skipif_teuthology_process + @patch("teuthology.run.fetch_qa_suite") + def test_fetch_tasks_if_needed(self, m_fetch_qa_suite): + config = {"suite_path": "/some/suite/path", "suite_branch": "feature_branch", + "suite_sha1": "commit"} + m_fetch_qa_suite.return_value = "/some/other/suite/path" + result = run.fetch_tasks_if_needed(config) + m_fetch_qa_suite.assert_called_with("feature_branch", commit="commit") + assert result == "/some/other/suite/path/qa" + + @patch("teuthology.run.get_status") + @patch("teuthology.run.nuke") + @patch("yaml.safe_dump") + @patch("teuthology.report.try_push_job_info") + @patch("teuthology.run.email_results") + @patch("teuthology.run.open") + @patch("sys.exit") + def test_report_outcome(self, m_sys_exit, m_open, m_email_results, m_try_push_job_info, m_safe_dump, m_nuke, m_get_status): + m_get_status.return_value = "fail" + fake_ctx = Mock() + summary = {"failure_reason": "reasons"} + summary_dump = "failure_reason: reasons\n" + config = {"nuke-on-error": True, "email-on-error": True} + config_dump = "nuke-on-error: true\nemail-on-error: true\n" + m_safe_dump.side_effect = [None, summary_dump, config_dump] + run.report_outcome(config, "the/archive/path", summary, fake_ctx) + assert m_nuke.called + m_try_push_job_info.assert_called_with(config, summary) + m_open.assert_called_with("the/archive/path/summary.yaml", "w") + assert m_email_results.called + assert m_open.called + assert m_sys_exit.called + + @patch("teuthology.run.set_up_logging") + @patch("teuthology.run.setup_config") + @patch("teuthology.run.get_user") + @patch("teuthology.run.write_initial_metadata") + @patch("teuthology.report.try_push_job_info") + @patch("teuthology.run.get_machine_type") + @patch("teuthology.run.get_summary") + @patch("yaml.safe_dump") + @patch("teuthology.run.validate_tasks") + @patch("teuthology.run.get_initial_tasks") + @patch("teuthology.run.fetch_tasks_if_needed") + @patch("teuthology.run.run_tasks") + @patch("teuthology.run.report_outcome") + def test_main(self, m_report_outcome, m_run_tasks, m_fetch_tasks_if_needed, m_get_initial_tasks, m_validate_tasks, + m_safe_dump, m_get_summary, m_get_machine_type, m_try_push_job_info, m_write_initial_metadata, + m_get_user, m_setup_config, m_set_up_logging): + """ This really should be an integration test of some sort. """ + config = {"job_id": 1} + m_setup_config.return_value = config + m_get_machine_type.return_value = "machine_type" + doc = scripts_run.__doc__ + args = docopt.docopt(doc, [ + "--verbose", + "--archive", "some/archive/dir", + "--description", "the_description", + "--lock", + "--os-type", "os_type", + "--os-version", "os_version", + "--block", + "--name", "the_name", + "--suite-path", "some/suite/dir", + "path/to/config.yml", + ]) + m_get_user.return_value = "the_owner" + m_get_summary.return_value = dict(success=True, owner="the_owner", description="the_description") + m_validate_tasks.return_value = ['task3'] + m_get_initial_tasks.return_value = ['task1', 'task2'] + m_fetch_tasks_if_needed.return_value = "some/suite/dir" + run.main(args) + m_set_up_logging.assert_called_with(True, "some/archive/dir") + m_setup_config.assert_called_with(["path/to/config.yml"]) + m_write_initial_metadata.assert_called_with( + "some/archive/dir", + config, + "the_name", + "the_description", + "the_owner" + ) + m_try_push_job_info.assert_called_with(config, dict(status='running')) + m_get_machine_type.assert_called_with(None, config) + m_get_summary.assert_called_with("the_owner", "the_description") + m_get_initial_tasks.assert_called_with(True, config, "machine_type") + m_fetch_tasks_if_needed.assert_called_with(config) + assert m_report_outcome.called + args, kwargs = m_run_tasks.call_args + fake_ctx = kwargs["ctx"]._conf + # fields that must be in ctx for the tasks to behave + expected_ctx = ["verbose", "archive", "description", "owner", "lock", "machine_type", "os_type", "os_version", + "block", "name", "suite_path", "config", "summary"] + for key in expected_ctx: + assert key in fake_ctx + assert isinstance(fake_ctx["config"], dict) + assert isinstance(fake_ctx["summary"], dict) + assert "tasks" in fake_ctx["config"] + # ensures that values missing in args are added with the correct value + assert fake_ctx["owner"] == "the_owner" + assert fake_ctx["machine_type"] == "machine_type" + # ensures os_type and os_version are property overwritten + assert fake_ctx["config"]["os_type"] == "os_type" + assert fake_ctx["config"]["os_version"] == "os_version" + + def test_get_teuthology_command(self): + doc = scripts_run.__doc__ + args = docopt.docopt(doc, [ + "--archive", "some/archive/dir", + "--description", "the_description", + "--lock", + "--block", + "--name", "the_name", + "--suite-path", "some/suite/dir", + "path/to/config.yml", "path/to/config2.yaml", + ]) + result = run.get_teuthology_command(args) + result = result.split() + expected = [ + "teuthology", + "path/to/config.yml", "path/to/config2.yaml", + "--suite-path", "some/suite/dir", + "--lock", + "--description", "the_description", + "--name", "the_name", + "--block", + "--archive", "some/archive/dir", + ] + assert len(result) == len(expected) + for arg in expected: + assert arg in result diff --git a/teuthology/test/test_safepath.py b/teuthology/test/test_safepath.py new file mode 100644 index 0000000000..afc81cdad3 --- /dev/null +++ b/teuthology/test/test_safepath.py @@ -0,0 +1,55 @@ +from teuthology import safepath + +class TestSafepath(object): + def test_simple(self): + got = safepath.munge('foo') + assert got == 'foo' + + def test_empty(self): + # really odd corner case + got = safepath.munge('') + assert got == '_' + + def test_slash(self): + got = safepath.munge('/') + assert got == '_' + + def test_slashslash(self): + got = safepath.munge('//') + assert got == '_' + + def test_absolute(self): + got = safepath.munge('/evil') + assert got == 'evil' + + def test_absolute_subdir(self): + got = safepath.munge('/evil/here') + assert got == 'evil/here' + + def test_dot_leading(self): + got = safepath.munge('./foo') + assert got == 'foo' + + def test_dot_middle(self): + got = safepath.munge('evil/./foo') + assert got == 'evil/foo' + + def test_dot_trailing(self): + got = safepath.munge('evil/foo/.') + assert got == 'evil/foo' + + def test_dotdot(self): + got = safepath.munge('../evil/foo') + assert got == '_./evil/foo' + + def test_dotdot_subdir(self): + got = safepath.munge('evil/../foo') + assert got == 'evil/_./foo' + + def test_hidden(self): + got = safepath.munge('.evil') + assert got == '_evil' + + def test_hidden_subdir(self): + got = safepath.munge('foo/.evil') + assert got == 'foo/_evil' diff --git a/teuthology/test/test_schedule.py b/teuthology/test/test_schedule.py new file mode 100644 index 0000000000..dd0a68f845 --- /dev/null +++ b/teuthology/test/test_schedule.py @@ -0,0 +1,45 @@ +from teuthology.schedule import build_config +from teuthology.misc import get_user + + +class TestSchedule(object): + basic_args = { + '--verbose': False, + '--owner': 'OWNER', + '--description': 'DESC', + '--email': 'EMAIL', + '--first-in-suite': False, + '--last-in-suite': True, + '--name': 'NAME', + '--worker': 'tala', + '--timeout': '6', + '--priority': '99', + # TODO: make this work regardless of $PWD + #'': ['../../examples/3node_ceph.yaml', + # '../../examples/3node_rgw.yaml'], + } + + def test_basic(self): + expected = { + 'description': 'DESC', + 'email': 'EMAIL', + 'first_in_suite': False, + 'last_in_suite': True, + 'machine_type': 'tala', + 'name': 'NAME', + 'owner': 'OWNER', + 'priority': 99, + 'results_timeout': '6', + 'verbose': False, + 'tube': 'tala', + } + + job_dict = build_config(self.basic_args) + assert job_dict == expected + + def test_owner(self): + args = self.basic_args + args['--owner'] = None + job_dict = build_config(self.basic_args) + assert job_dict['owner'] == 'scheduled_%s' % get_user() + diff --git a/teuthology/test/test_scrape.py b/teuthology/test/test_scrape.py new file mode 100644 index 0000000000..f8a03520ec --- /dev/null +++ b/teuthology/test/test_scrape.py @@ -0,0 +1,167 @@ +from __future__ import with_statement + +import os +import shutil +import tempfile +import yaml +from teuthology import scrape + +class FakeResultDir(object): + """Mocks a Result Directory""" + + def __init__(self, + failure_reason="Dummy reason", + assertion="FAILED assert 1 == 2\n", + blank_backtrace=False + ): + self.failure_reason = failure_reason + self.assertion = assertion + self.blank_backtrace = blank_backtrace + self.path = tempfile.mkdtemp() + + with open(os.path.join(self.path, "config.yaml"), "w") as f: + yaml.dump({"description": "Dummy test"}, f) + + with open(os.path.join(self.path, "summary.yaml"), "w") as f: + yaml.dump({ + "success": "false", + "failure_reason": self.failure_reason + }, f) + + with open(os.path.join(self.path, "teuthology.log"), "w") as f: + if not self.blank_backtrace: + f.write(" ceph version 1000\n") + f.write(".stderr: Dummy error\n") + f.write(self.assertion) + f.write(" NOTE: a copy of the executable dummy text\n") + + def __enter__(self): + return self + + def __exit__(self, exc_typ, exc_val, exc_tb): + shutil.rmtree(self.path) + +class TestScrape(object): + """Tests for teuthology.scrape""" + + def test_grep(self): + with FakeResultDir() as d: + filepath = os.path.join(d.path, "scrapetest.txt") + with open(filepath, 'w') as f: + f.write("Ceph is an open-source software storage platform\n\ + Teuthology is used for testing.") + + #System level grep is called + value1 = scrape.grep(filepath, "software") + value2 = scrape.grep(filepath, "device") + + assert value1 ==\ + ['Ceph is an open-source software storage platform', ''] + assert value2 == [] + + def test_job(self): + with FakeResultDir() as d: + job = scrape.Job(d.path, 1) + assert job.get_success() == "false" + assert job.get_assertion() == "FAILED assert 1 == 2" + assert job.get_last_tlog_line() ==\ + b"NOTE: a copy of the executable dummy text" + assert job.get_failure_reason() == "Dummy reason" + + def test_timeoutreason(self): + with FakeResultDir(failure_reason=\ + "status 124: timeout '123 /home/ubuntu/cephtest/workunit.client.0/cephtool/test.sh'") as d: + job = scrape.Job(d.path, 1) + assert scrape.TimeoutReason.could_be(job) + assert scrape.TimeoutReason(job).match(job) + + def test_deadreason(self): + with FakeResultDir() as d: + job = scrape.Job(d.path, 1) + #Summary is present + #So this cannot be a DeadReason + assert not scrape.DeadReason.could_be(job) + + def test_lockdepreason(self): + lkReason = None + with FakeResultDir(assertion=\ + "FAILED assert common/lockdep reason\n") as d: + job = scrape.Job(d.path, 1) + assert scrape.LockdepReason.could_be(job) + + lkReason = scrape.LockdepReason(job) + #Backtraces of same jobs must match 100% + assert lkReason.match(job) + with FakeResultDir(blank_backtrace=True) as d: + #Corresponding to 0% match + assert not lkReason.match(scrape.Job(d.path, 2)) + + def test_assertionreason(self): + with FakeResultDir() as d: + job = scrape.Job(d.path, 1) + assert scrape.AssertionReason.could_be(job) + + def test_genericreason(self): + d1 = FakeResultDir(blank_backtrace=True) + d2 = FakeResultDir(failure_reason="Dummy dummy") + d3 = FakeResultDir() + + job1 = scrape.Job(d1.path, 1) + job2 = scrape.Job(d2.path, 2) + job3 = scrape.Job(d3.path, 3) + + reason = scrape.GenericReason(job3) + + assert reason.match(job2) + assert not reason.match(job1) + + shutil.rmtree(d1.path) + shutil.rmtree(d2.path) + shutil.rmtree(d3.path) + + def test_valgrindreason(self): + vreason = None + with FakeResultDir( + failure_reason="saw valgrind issues", + assertion="2014-08-22T20:07:18.668 ERROR:tasks.ceph:saw valgrind issue Leak_DefinitelyLost in /var/log/ceph/valgrind/osd.3.log.gz\n" + ) as d: + job = scrape.Job(d.path, 1) + assert scrape.ValgrindReason.could_be(job) + + vreason = scrape.ValgrindReason(job) + assert vreason.match(job) + + def test_give_me_a_reason(self): + with FakeResultDir() as d: + job = scrape.Job(d.path, 1) + + assert type(scrape.give_me_a_reason(job)) == scrape.AssertionReason + + #Test the lockdep ordering + with FakeResultDir(assertion=\ + "FAILED assert common/lockdep reason\n") as d: + job = scrape.Job(d.path, 1) + assert type(scrape.give_me_a_reason(job)) == scrape.LockdepReason + + def test_scraper(self): + d = FakeResultDir() + os.mkdir(os.path.join(d.path, "test")) + shutil.move( + os.path.join(d.path, "config.yaml"), + os.path.join(d.path, "test", "config.yaml") + ) + shutil.move( + os.path.join(d.path, "summary.yaml"), + os.path.join(d.path, "test", "summary.yaml") + ) + shutil.move( + os.path.join(d.path, "teuthology.log"), + os.path.join(d.path, "test", "teuthology.log") + ) + + scrape.Scraper(d.path).analyze() + + #scrape.log should be created + assert os.path.exists(os.path.join(d.path, "scrape.log")) + + shutil.rmtree(d.path) diff --git a/teuthology/test/test_timer.py b/teuthology/test/test_timer.py new file mode 100644 index 0000000000..312a9c8b86 --- /dev/null +++ b/teuthology/test/test_timer.py @@ -0,0 +1,80 @@ +from teuthology import timer + +from unittest.mock import MagicMock, patch, mock_open +from time import time + + +class TestTimer(object): + def test_data_empty(self): + self.timer = timer.Timer() + assert self.timer.data == dict() + + def test_data_one_mark(self): + self.timer = timer.Timer() + # Avoid failing if ~1ms elapses between these two calls + self.timer.precision = 2 + self.timer.mark() + assert len(self.timer.data['marks']) == 1 + assert self.timer.data['marks'][0]['interval'] == 0 + assert self.timer.data['marks'][0]['message'] == '' + + def test_data_five_marks(self): + self.timer = timer.Timer() + for i in range(5): + self.timer.mark(str(i)) + assert len(self.timer.data['marks']) == 5 + assert [m['message'] for m in self.timer.data['marks']] == \ + ['0', '1', '2', '3', '4'] + + def test_intervals(self): + fake_time = MagicMock() + with patch('teuthology.timer.time.time', fake_time): + self.timer = timer.Timer() + now = start_time = fake_time.return_value = time() + intervals = [0, 1, 1, 2, 3, 5, 8] + for i in intervals: + now += i + fake_time.return_value = now + self.timer.mark(str(i)) + + summed_intervals = [sum(intervals[:x + 1]) for x in range(len(intervals))] + result_intervals = [m['interval'] for m in self.timer.data['marks']] + assert result_intervals == summed_intervals + assert self.timer.data['start'] == \ + self.timer.get_datetime_string(start_time) + assert self.timer.data['end'] == \ + self.timer.get_datetime_string(start_time + summed_intervals[-1]) + assert [m['message'] for m in self.timer.data['marks']] == \ + [str(i) for i in intervals] + assert self.timer.data['elapsed'] == summed_intervals[-1] + + def test_write(self): + _path = '/path' + _safe_dump = MagicMock(name='safe_dump') + with patch('teuthology.timer.yaml.safe_dump', _safe_dump): + with patch('teuthology.timer.open', mock_open(), create=True) as _open: + self.timer = timer.Timer(path=_path) + assert self.timer.path == _path + self.timer.write() + _open.assert_called_once_with(_path, 'w') + _safe_dump.assert_called_once_with( + dict(), + _open.return_value.__enter__.return_value, + default_flow_style=False, + ) + + def test_sync(self): + _path = '/path' + _safe_dump = MagicMock(name='safe_dump') + with patch('teuthology.timer.yaml.safe_dump', _safe_dump): + with patch('teuthology.timer.open', mock_open(), create=True) as _open: + self.timer = timer.Timer(path=_path, sync=True) + assert self.timer.path == _path + assert self.timer.sync is True + self.timer.mark() + _open.assert_called_once_with(_path, 'w') + _safe_dump.assert_called_once_with( + self.timer.data, + _open.return_value.__enter__.return_value, + default_flow_style=False, + ) diff --git a/teuthology/test/test_vps_os_vers_parameter_checking.py b/teuthology/test/test_vps_os_vers_parameter_checking.py new file mode 100644 index 0000000000..43ad5ae303 --- /dev/null +++ b/teuthology/test/test_vps_os_vers_parameter_checking.py @@ -0,0 +1,84 @@ +from unittest.mock import patch, Mock + +import teuthology.lock.util +from teuthology import provision + + +class TestVpsOsVersionParamCheck(object): + + def setup(self): + self.fake_ctx = Mock() + self.fake_ctx.machine_type = 'vps' + self.fake_ctx.num_to_lock = 1 + self.fake_ctx.lock = False + + def fake_downburst_executable(): + return '' + + self.fake_downburst_executable = fake_downburst_executable + + def test_ubuntu_precise(self): + self.fake_ctx.os_type = 'ubuntu' + self.fake_ctx.os_version = 'precise' + with patch.multiple( + provision.downburst, + downburst_executable=self.fake_downburst_executable, + ): + check_value = teuthology.lock.util.vps_version_or_type_valid( + self.fake_ctx.machine_type, + self.fake_ctx.os_type, + self.fake_ctx.os_version) + + assert check_value + + def test_ubuntu_number(self): + self.fake_ctx.os_type = 'ubuntu' + self.fake_ctx.os_version = '12.04' + with patch.multiple( + provision.downburst, + downburst_executable=self.fake_downburst_executable, + ): + check_value = teuthology.lock.util.vps_version_or_type_valid( + self.fake_ctx.machine_type, + self.fake_ctx.os_type, + self.fake_ctx.os_version) + assert check_value + + def test_mixup(self): + self.fake_ctx.os_type = '6.5' + self.fake_ctx.os_version = 'rhel' + with patch.multiple( + provision.downburst, + downburst_executable=self.fake_downburst_executable, + ): + check_value = teuthology.lock.util.vps_version_or_type_valid( + self.fake_ctx.machine_type, + self.fake_ctx.os_type, + self.fake_ctx.os_version) + assert not check_value + + def test_bad_type(self): + self.fake_ctx.os_type = 'aardvark' + self.fake_ctx.os_version = '6.5' + with patch.multiple( + provision.downburst, + downburst_executable=self.fake_downburst_executable, + ): + check_value = teuthology.lock.util.vps_version_or_type_valid( + self.fake_ctx.machine_type, + self.fake_ctx.os_type, + self.fake_ctx.os_version) + assert not check_value + + def test_bad_version(self): + self.fake_ctx.os_type = 'rhel' + self.fake_ctx.os_version = 'vampire_bat' + with patch.multiple( + provision.downburst, + downburst_executable=self.fake_downburst_executable, + ): + check_value = teuthology.lock.util.vps_version_or_type_valid( + self.fake_ctx.machine_type, + self.fake_ctx.os_type, + self.fake_ctx.os_version) + assert not check_value diff --git a/teuthology/test/test_worker.py b/teuthology/test/test_worker.py new file mode 100644 index 0000000000..87d3af88ca --- /dev/null +++ b/teuthology/test/test_worker.py @@ -0,0 +1,307 @@ +import beanstalkc +import os + +from unittest.mock import patch, Mock, MagicMock +from datetime import datetime, timedelta + +from teuthology import worker + +from teuthology.contextutil import MaxWhileTries + + +class TestWorker(object): + def setup(self): + self.ctx = Mock() + self.ctx.verbose = True + self.ctx.archive_dir = '/archive/dir' + self.ctx.log_dir = '/log/dir' + self.ctx.tube = 'tube' + + @patch("os.path.exists") + def test_restart_file_path_doesnt_exist(self, m_exists): + m_exists.return_value = False + result = worker.sentinel(worker.restart_file_path) + assert not result + + @patch("os.path.getmtime") + @patch("os.path.exists") + @patch("teuthology.worker.datetime") + def test_needs_restart(self, m_datetime, m_exists, m_getmtime): + m_exists.return_value = True + m_datetime.utcfromtimestamp.return_value = datetime.utcnow() + timedelta(days=1) + result = worker.sentinel(worker.restart_file_path) + assert result + + @patch("os.path.getmtime") + @patch("os.path.exists") + @patch("teuthology.worker.datetime") + def test_does_not_need_restart(self, m_datetime, m_exists, getmtime): + m_exists.return_value = True + m_datetime.utcfromtimestamp.return_value = datetime.utcnow() - timedelta(days=1) + result = worker.sentinel(worker.restart_file_path) + assert not result + + @patch("os.symlink") + def test_symlink_success(self, m_symlink): + worker.symlink_worker_log("path/to/worker.log", "path/to/archive") + m_symlink.assert_called_with("path/to/worker.log", "path/to/archive/worker.log") + + @patch("teuthology.worker.log") + @patch("os.symlink") + def test_symlink_failure(self, m_symlink, m_log): + m_symlink.side_effect = IOError + worker.symlink_worker_log("path/to/worker.log", "path/to/archive") + # actually logs the exception + assert m_log.exception.called + + @patch("teuthology.worker.run_with_watchdog") + @patch("teuthology.worker.teuth_config") + @patch("subprocess.Popen") + @patch("os.environ") + @patch("os.mkdir") + @patch("yaml.safe_dump") + @patch("tempfile.NamedTemporaryFile") + def test_run_job_with_watchdog(self, m_tempfile, m_safe_dump, m_mkdir, + m_environ, m_popen, m_t_config, + m_run_watchdog): + config = { + "suite_path": "suite/path", + "config": {"foo": "bar"}, + "verbose": True, + "owner": "the_owner", + "archive_path": "archive/path", + "name": "the_name", + "description": "the_description", + "job_id": "1", + } + m_tmp = MagicMock() + temp_file = Mock() + temp_file.name = "the_name" + m_tmp.__enter__.return_value = temp_file + m_tempfile.return_value = m_tmp + env = dict(PYTHONPATH="python/path") + m_environ.copy.return_value = env + m_p = Mock() + m_p.returncode = 0 + m_popen.return_value = m_p + m_t_config.results_server = True + worker.run_job(config, "teuth/bin/path", "archive/dir", verbose=False) + m_run_watchdog.assert_called_with(m_p, config) + expected_args = [ + 'teuth/bin/path/teuthology', + '-v', + '--lock', + '--block', + '--owner', 'the_owner', + '--archive', 'archive/path', + '--name', 'the_name', + '--description', + 'the_description', + '--', + "the_name" + ] + m_popen.assert_called_with(args=expected_args, env=env) + + @patch("time.sleep") + @patch("teuthology.worker.symlink_worker_log") + @patch("teuthology.worker.teuth_config") + @patch("subprocess.Popen") + @patch("os.environ") + @patch("os.mkdir") + @patch("yaml.safe_dump") + @patch("tempfile.NamedTemporaryFile") + def test_run_job_no_watchdog(self, m_tempfile, m_safe_dump, m_mkdir, + m_environ, m_popen, m_t_config, m_symlink_log, + m_sleep): + config = { + "suite_path": "suite/path", + "config": {"foo": "bar"}, + "verbose": True, + "owner": "the_owner", + "archive_path": "archive/path", + "name": "the_name", + "description": "the_description", + "worker_log": "worker/log.log", + "job_id": "1", + } + m_tmp = MagicMock() + temp_file = Mock() + temp_file.name = "the_name" + m_tmp.__enter__.return_value = temp_file + m_tempfile.return_value = m_tmp + env = dict(PYTHONPATH="python/path") + m_environ.copy.return_value = env + m_p = Mock() + m_p.returncode = 1 + m_popen.return_value = m_p + m_t_config.results_server = False + worker.run_job(config, "teuth/bin/path", "archive/dir", verbose=False) + m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"]) + + @patch("teuthology.worker.report.try_push_job_info") + @patch("teuthology.worker.symlink_worker_log") + @patch("time.sleep") + def test_run_with_watchdog_no_reporting(self, m_sleep, m_symlink_log, m_try_push): + config = { + "name": "the_name", + "job_id": "1", + "worker_log": "worker_log", + "archive_path": "archive/path", + "teuthology_branch": "main" + } + process = Mock() + process.poll.return_value = "not None" + worker.run_with_watchdog(process, config) + m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"]) + m_try_push.assert_called_with( + dict(name=config["name"], job_id=config["job_id"]), + dict(status='dead') + ) + + @patch("subprocess.Popen") + @patch("teuthology.worker.symlink_worker_log") + @patch("time.sleep") + @patch("teuthology.worker.report.try_push_job_info") + def test_run_with_watchdog_with_reporting(self, m_tpji, m_sleep, m_symlink_log, m_popen): + config = { + "name": "the_name", + "job_id": "1", + "worker_log": "worker_log", + "archive_path": "archive/path", + "teuthology_branch": "jewel" + } + process = Mock() + process.poll.return_value = "not None" + m_proc = Mock() + m_proc.poll.return_value = "not None" + m_popen.return_value = m_proc + worker.run_with_watchdog(process, config) + m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"]) + + @patch("os.path.isdir") + @patch("teuthology.worker.fetch_teuthology") + @patch("teuthology.worker.teuth_config") + @patch("teuthology.worker.fetch_qa_suite") + def test_prep_job(self, m_fetch_qa_suite, + m_teuth_config, + m_fetch_teuthology, m_isdir): + config = dict( + name="the_name", + job_id="1", + ) + archive_dir = '/archive/dir' + log_file_path = '/worker/log' + m_fetch_teuthology.return_value = '/teuth/path' + m_fetch_qa_suite.return_value = '/suite/path' + m_isdir.return_value = True + m_teuth_config.teuthology_path = None + got_config, teuth_bin_path = worker.prep_job( + config, + log_file_path, + archive_dir, + ) + assert got_config['worker_log'] == log_file_path + assert got_config['archive_path'] == os.path.join( + archive_dir, + config['name'], + config['job_id'], + ) + assert got_config['teuthology_branch'] == 'main' + assert m_fetch_teuthology.called_once_with_args(branch='main') + assert teuth_bin_path == '/teuth/path/virtualenv/bin' + assert m_fetch_qa_suite.called_once_with_args(branch='main') + assert got_config['suite_path'] == '/suite/path' + + def build_fake_jobs(self, m_connection, m_job, job_bodies): + """ + Given patched copies of: + beanstalkc.Connection + beanstalkc.Job + And a list of basic job bodies, return a list of mocked Job objects + """ + # Make sure instantiating m_job returns a new object each time + m_job.side_effect = lambda **kwargs: Mock(spec=beanstalkc.Job) + jobs = [] + job_id = 0 + for job_body in job_bodies: + job_id += 1 + job = m_job(conn=m_connection, jid=job_id, body=job_body) + job.jid = job_id + job.body = job_body + jobs.append(job) + return jobs + + @patch("teuthology.worker.run_job") + @patch("teuthology.worker.prep_job") + @patch("beanstalkc.Job", autospec=True) + @patch("teuthology.worker.fetch_qa_suite") + @patch("teuthology.worker.fetch_teuthology") + @patch("teuthology.worker.beanstalk.watch_tube") + @patch("teuthology.worker.beanstalk.connect") + @patch("os.path.isdir", return_value=True) + @patch("teuthology.worker.setup_log_file") + def test_main_loop( + self, m_setup_log_file, m_isdir, m_connect, m_watch_tube, + m_fetch_teuthology, m_fetch_qa_suite, m_job, m_prep_job, m_run_job, + ): + m_connection = Mock() + jobs = self.build_fake_jobs( + m_connection, + m_job, + [ + 'foo: bar', + 'stop_worker: true', + ], + ) + m_connection.reserve.side_effect = jobs + m_connect.return_value = m_connection + m_prep_job.return_value = (dict(), '/bin/path') + worker.main(self.ctx) + # There should be one reserve call per item in the jobs list + expected_reserve_calls = [ + dict(timeout=60) for i in range(len(jobs)) + ] + got_reserve_calls = [ + call[1] for call in m_connection.reserve.call_args_list + ] + assert got_reserve_calls == expected_reserve_calls + for job in jobs: + job.bury.assert_called_once_with() + job.delete.assert_called_once_with() + + @patch("teuthology.worker.report.try_push_job_info") + @patch("teuthology.worker.run_job") + @patch("beanstalkc.Job", autospec=True) + @patch("teuthology.worker.fetch_qa_suite") + @patch("teuthology.worker.fetch_teuthology") + @patch("teuthology.worker.beanstalk.watch_tube") + @patch("teuthology.worker.beanstalk.connect") + @patch("os.path.isdir", return_value=True) + @patch("teuthology.worker.setup_log_file") + def test_main_loop_13925( + self, m_setup_log_file, m_isdir, m_connect, m_watch_tube, + m_fetch_teuthology, m_fetch_qa_suite, m_job, m_run_job, + m_try_push_job_info, + ): + m_connection = Mock() + jobs = self.build_fake_jobs( + m_connection, + m_job, + [ + 'name: name', + 'name: name\nstop_worker: true', + ], + ) + m_connection.reserve.side_effect = jobs + m_connect.return_value = m_connection + m_fetch_qa_suite.side_effect = [ + '/suite/path', + MaxWhileTries(), + MaxWhileTries(), + ] + worker.main(self.ctx) + assert len(m_run_job.call_args_list) == 0 + assert len(m_try_push_job_info.call_args_list) == len(jobs) + for i in range(len(jobs)): + push_call = m_try_push_job_info.call_args_list[i] + assert push_call[0][1]['status'] == 'dead' diff --git a/teuthology/timer.py b/teuthology/timer.py new file mode 100644 index 0000000000..0119b8b3a3 --- /dev/null +++ b/teuthology/timer.py @@ -0,0 +1,114 @@ +import logging +import time +import yaml + +from datetime import datetime + +log = logging.getLogger(__name__) + + +class Timer(object): + """ + A class that records timing data. + + It was created in order to record time intervals between the execution of + different tasks' enter and exit methods. + """ + # How many decimal places to use for time intervals + precision = 3 + # The format to use for date-time strings + datetime_format = '%Y-%m-%d_%H:%M:%S' + + def __init__(self, path=None, sync=False): + """ + :param path: A path to a file to be written when self.write() is + called. The file will contain self.data in yaml + format. + :param sync: Whether or not to call self.write() from within + self.mark() + """ + if sync and not path: + raise ValueError( + "When providing sync=True, a path must be specified!") + self.path = path + self.sync = sync + self.marks = list() + self.start_time = None + self.start_string = None + + def mark(self, message=''): + """ + Create a time mark + + If necessary, call self._mark_start() to begin time-keeping. Then, + create a new entry in self.marks with the message provided, along with + the time elapsed in seconds since time-keeping began. + """ + if self.start_time is None: + self._mark_start(message) + interval = round(time.time() - self.start_time, self.precision) + mark = dict( + interval=interval, + message=message, + ) + self.marks.append(mark) + if self.sync: + self.write() + + def _mark_start(self, message): + """ + Create the initial time mark + """ + self.start_time = time.time() + self.start_string = self.get_datetime_string(self.start_time) + + def get_datetime_string(self, time): + """ + Return a human-readable timestamp in UTC + + :param time: Time in seconds; like from time.time() + """ + _datetime = datetime.utcfromtimestamp(time) + return datetime.strftime( + _datetime, + self.datetime_format, + ) + + @property + def data(self): + """ + Return an object similar to:: + + {'start': '2016-02-02_23:19:51', + 'elapsed': 10.65, + 'end': '2016-02-02_23:20:01', + 'marks': [ + {'message': 'event 1', 'interval': 0.0}, + {'message': 'event 2', 'interval': 8.58}, + {'message': 'event 3', 'interval': 10.65} + ], + } + + 'start' and 'end' times are in UTC. + """ + if not self.start_string: + return dict() + if len(self.marks) <= 1: + end_interval = 0 + else: + end_interval = self.marks[-1]['interval'] + end_time = self.start_time + end_interval + result = dict( + start=self.start_string, + marks=self.marks, + end=self.get_datetime_string(end_time), + elapsed=end_interval, + ) + return result + + def write(self): + try: + with open(self.path, 'w') as f: + yaml.safe_dump(self.data, f, default_flow_style=False) + except Exception: + log.exception("Failed to write timing.yaml !") diff --git a/teuthology/util/__init__.py b/teuthology/util/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/util/compat.py b/teuthology/util/compat.py new file mode 100644 index 0000000000..fc654e3d69 --- /dev/null +++ b/teuthology/util/compat.py @@ -0,0 +1,16 @@ +import sys + +PY3 = False + +if sys.version_info >= (3, 0): + PY3 = True + +if PY3: + from urllib.parse import parse_qs, urljoin, urlparse, urlencode # noqa: F401 + from urllib.request import urlopen, Request # noqa: F401 + from urllib.error import HTTPError # noqa: F401 +else: + from urlparse import parse_qs, urljoin, urlparse # noqa: F401 + from urllib import urlencode # noqa: F401 + from urllib2 import urlopen, Request, HTTPError # noqa: F401 + diff --git a/teuthology/util/flock.py b/teuthology/util/flock.py new file mode 100644 index 0000000000..f381d8b51b --- /dev/null +++ b/teuthology/util/flock.py @@ -0,0 +1,22 @@ +import fcntl + + +class FileLock(object): + def __init__(self, filename, noop=False): + self.filename = filename + self.file = None + self.noop = noop + + def __enter__(self): + if not self.noop: + assert self.file is None + self.file = open(self.filename, 'w') + fcntl.lockf(self.file, fcntl.LOCK_EX) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.noop: + assert self.file is not None + fcntl.lockf(self.file, fcntl.LOCK_UN) + self.file.close() + self.file = None diff --git a/teuthology/worker.py b/teuthology/worker.py new file mode 100644 index 0000000000..b99fab33d7 --- /dev/null +++ b/teuthology/worker.py @@ -0,0 +1,357 @@ +import logging +import os +import subprocess +import sys +import tempfile +import time +import yaml + +from datetime import datetime + +from teuthology import setup_log_file, install_except_hook +from teuthology import beanstalk +from teuthology import report +from teuthology import safepath +from teuthology.config import config as teuth_config +from teuthology.config import set_config_attr +from teuthology.exceptions import BranchNotFoundError, CommitNotFoundError, SkipJob, MaxWhileTries +from teuthology.kill import kill_job +from teuthology.repo_utils import fetch_qa_suite, fetch_teuthology, ls_remote, build_git_url + +log = logging.getLogger(__name__) +start_time = datetime.utcnow() +restart_file_path = '/tmp/teuthology-restart-workers' +stop_file_path = '/tmp/teuthology-stop-workers' + + +def sentinel(path): + if not os.path.exists(path): + return False + file_mtime = datetime.utcfromtimestamp(os.path.getmtime(path)) + if file_mtime > start_time: + return True + else: + return False + + +def restart(): + log.info('Restarting...') + args = sys.argv[:] + args.insert(0, sys.executable) + os.execv(sys.executable, args) + + +def stop(): + log.info('Stopping...') + sys.exit(0) + + +def load_config(ctx=None): + teuth_config.load() + if ctx is not None: + if not os.path.isdir(ctx.archive_dir): + sys.exit("{prog}: archive directory must exist: {path}".format( + prog=os.path.basename(sys.argv[0]), + path=ctx.archive_dir, + )) + else: + teuth_config.archive_base = ctx.archive_dir + + +def main(ctx): + loglevel = logging.INFO + if ctx.verbose: + loglevel = logging.DEBUG + log.setLevel(loglevel) + + log_file_path = os.path.join(ctx.log_dir, 'worker.{tube}.{pid}'.format( + pid=os.getpid(), tube=ctx.tube,)) + setup_log_file(log_file_path) + + install_except_hook() + + load_config(ctx=ctx) + + set_config_attr(ctx) + + connection = beanstalk.connect() + beanstalk.watch_tube(connection, ctx.tube) + result_proc = None + + if teuth_config.teuthology_path is None: + fetch_teuthology('main') + fetch_qa_suite('main') + + keep_running = True + while keep_running: + # Check to see if we have a teuthology-results process hanging around + # and if so, read its return code so that it can exit. + if result_proc is not None and result_proc.poll() is not None: + log.debug("teuthology-results exited with code: %s", + result_proc.returncode) + result_proc = None + + if sentinel(restart_file_path): + restart() + elif sentinel(stop_file_path): + stop() + + load_config() + + job = connection.reserve(timeout=60) + if job is None: + continue + + # bury the job so it won't be re-run if it fails + job.bury() + job_id = job.jid + log.info('Reserved job %d', job_id) + log.info('Config is: %s', job.body) + job_config = yaml.safe_load(job.body) + job_config['job_id'] = str(job_id) + + if job_config.get('stop_worker'): + keep_running = False + + try: + job_config, teuth_bin_path = prep_job( + job_config, + log_file_path, + ctx.archive_dir, + ) + run_job( + job_config, + teuth_bin_path, + ctx.archive_dir, + ctx.verbose, + ) + except SkipJob: + continue + + # This try/except block is to keep the worker from dying when + # beanstalkc throws a SocketError + try: + job.delete() + except Exception: + log.exception("Saw exception while trying to delete job") + + +def prep_job(job_config, log_file_path, archive_dir): + job_id = job_config['job_id'] + safe_archive = safepath.munge(job_config['name']) + job_config['worker_log'] = log_file_path + archive_path_full = os.path.join( + archive_dir, safe_archive, str(job_id)) + job_config['archive_path'] = archive_path_full + + # If the teuthology branch was not specified, default to main and + # store that value. + teuthology_branch = job_config.get('teuthology_branch', 'main') + job_config['teuthology_branch'] = teuthology_branch + teuthology_sha1 = job_config.get('teuthology_sha1') + if not teuthology_sha1: + repo_url = build_git_url('teuthology', 'ceph') + teuthology_sha1 = ls_remote(repo_url, teuthology_branch) + if not teuthology_sha1: + reason = "Teuthology branch {} not found; marking job as dead".format(teuthology_branch) + log.error(reason) + report.try_push_job_info( + job_config, + dict(status='dead', failure_reason=reason) + ) + raise SkipJob() + if teuth_config.teuthology_path is None: + log.info('Using teuthology sha1 %s', teuthology_sha1) + + try: + if teuth_config.teuthology_path is not None: + teuth_path = teuth_config.teuthology_path + else: + teuth_path = fetch_teuthology(branch=teuthology_branch, + commit=teuthology_sha1) + # For the teuthology tasks, we look for suite_branch, and if we + # don't get that, we look for branch, and fall back to 'main'. + # last-in-suite jobs don't have suite_branch or branch set. + ceph_branch = job_config.get('branch', 'main') + suite_branch = job_config.get('suite_branch', ceph_branch) + suite_sha1 = job_config.get('suite_sha1') + suite_repo = job_config.get('suite_repo') + if suite_repo: + teuth_config.ceph_qa_suite_git_url = suite_repo + job_config['suite_path'] = os.path.normpath(os.path.join( + fetch_qa_suite(suite_branch, suite_sha1), + job_config.get('suite_relpath', ''), + )) + except (BranchNotFoundError, CommitNotFoundError) as exc: + log.exception("Requested version not found; marking job as dead") + report.try_push_job_info( + job_config, + dict(status='dead', failure_reason=str(exc)) + ) + raise SkipJob() + except MaxWhileTries as exc: + log.exception("Failed to fetch or bootstrap; marking job as dead") + report.try_push_job_info( + job_config, + dict(status='dead', failure_reason=str(exc)) + ) + raise SkipJob() + + teuth_bin_path = os.path.join(teuth_path, 'virtualenv', 'bin') + if not os.path.isdir(teuth_bin_path): + raise RuntimeError("teuthology branch %s at %s not bootstrapped!" % + (teuthology_branch, teuth_bin_path)) + return job_config, teuth_bin_path + + +def run_job(job_config, teuth_bin_path, archive_dir, verbose): + safe_archive = safepath.munge(job_config['name']) + if job_config.get('first_in_suite') or job_config.get('last_in_suite'): + if teuth_config.results_server: + try: + report.try_delete_jobs(job_config['name'], job_config['job_id']) + except Exception as e: + log.warning("Unable to delete job %s, exception occurred: %s", + job_config['job_id'], e) + suite_archive_dir = os.path.join(archive_dir, safe_archive) + safepath.makedirs('/', suite_archive_dir) + args = [ + os.path.join(teuth_bin_path, 'teuthology-results'), + '--archive-dir', suite_archive_dir, + '--name', job_config['name'], + ] + if job_config.get('first_in_suite'): + log.info('Generating memo for %s', job_config['name']) + if job_config.get('seed'): + args.extend(['--seed', job_config['seed']]) + if job_config.get('subset'): + args.extend(['--subset', job_config['subset']]) + if job_config.get('no_nested_subset'): + args.extend(['--no-nested-subset']) + else: + log.info('Generating results for %s', job_config['name']) + timeout = job_config.get('results_timeout', + teuth_config.results_timeout) + args.extend(['--timeout', str(timeout)]) + if job_config.get('email'): + args.extend(['--email', job_config['email']]) + # Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to + # make sure that it will continue to run if this worker process + # dies (e.g. because of a restart) + result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp) + log.info("teuthology-results PID: %s", result_proc.pid) + return + + log.info('Creating archive dir %s', job_config['archive_path']) + safepath.makedirs('/', job_config['archive_path']) + log.info('Running job %s', job_config['job_id']) + + suite_path = job_config['suite_path'] + arg = [ + os.path.join(teuth_bin_path, 'teuthology'), + ] + # The following is for compatibility with older schedulers, from before we + # started merging the contents of job_config['config'] into job_config + # itself. + if 'config' in job_config: + inner_config = job_config.pop('config') + if not isinstance(inner_config, dict): + log.warning("run_job: job_config['config'] isn't a dict, it's a %s", + str(type(inner_config))) + else: + job_config.update(inner_config) + + if verbose or job_config['verbose']: + arg.append('-v') + + arg.extend([ + '--lock', + '--block', + '--owner', job_config['owner'], + '--archive', job_config['archive_path'], + '--name', job_config['name'], + ]) + if job_config['description'] is not None: + arg.extend(['--description', job_config['description']]) + arg.append('--') + + with tempfile.NamedTemporaryFile(prefix='teuthology-worker.', + suffix='.tmp', mode='w+t') as tmp: + yaml.safe_dump(data=job_config, stream=tmp) + tmp.flush() + arg.append(tmp.name) + env = os.environ.copy() + python_path = env.get('PYTHONPATH', '') + python_path = ':'.join([suite_path, python_path]).strip(':') + env['PYTHONPATH'] = python_path + log.debug("Running: %s" % ' '.join(arg)) + p = subprocess.Popen(args=arg, env=env) + log.info("Job archive: %s", job_config['archive_path']) + log.info("Job PID: %s", str(p.pid)) + + if teuth_config.results_server: + log.info("Running with watchdog") + try: + run_with_watchdog(p, job_config) + except Exception: + log.exception("run_with_watchdog had an unhandled exception") + raise + else: + log.info("Running without watchdog") + # This sleep() is to give the child time to start up and create the + # archive dir. + time.sleep(5) + symlink_worker_log(job_config['worker_log'], + job_config['archive_path']) + p.wait() + + if p.returncode != 0: + log.error('Child exited with code %d', p.returncode) + else: + log.info('Success!') + + +def run_with_watchdog(process, job_config): + job_start_time = datetime.utcnow() + + # Only push the information that's relevant to the watchdog, to save db + # load + job_info = dict( + name=job_config['name'], + job_id=job_config['job_id'], + ) + + # Sleep once outside of the loop to avoid double-posting jobs + time.sleep(teuth_config.watchdog_interval) + symlink_worker_log(job_config['worker_log'], job_config['archive_path']) + while process.poll() is None: + # Kill jobs that have been running longer than the global max + run_time = datetime.utcnow() - job_start_time + total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds + if total_seconds > teuth_config.max_job_time: + log.warning("Job ran longer than {max}s. Killing...".format( + max=teuth_config.max_job_time)) + kill_job(job_info['name'], job_info['job_id'], + teuth_config.archive_base, job_config['owner']) + + # calling this without a status just updates the jobs updated time + report.try_push_job_info(job_info) + time.sleep(teuth_config.watchdog_interval) + + # we no longer support testing theses old branches + assert(job_config.get('teuthology_branch') not in ('argonaut', 'bobtail', + 'cuttlefish', 'dumpling')) + + # Let's make sure that paddles knows the job is finished. We don't know + # the status, but if it was a pass or fail it will have already been + # reported to paddles. In that case paddles ignores the 'dead' status. + # If the job was killed, paddles will use the 'dead' status. + report.try_push_job_info(job_info, dict(status='dead')) + + +def symlink_worker_log(worker_log_path, archive_dir): + try: + log.debug("Worker log: %s", worker_log_path) + os.symlink(worker_log_path, os.path.join(archive_dir, 'worker.log')) + except Exception: + log.exception("Failed to symlink worker log") diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..9193865d07 --- /dev/null +++ b/tox.ini @@ -0,0 +1,61 @@ +[tox] +envlist = docs, py3, flake8, openstack +isolated_build = True + +[testenv] +setenv = + LC_ALL=en_US.UTF-8 + LANG=en_US + +[testenv:py3] +install_command = pip install --upgrade {opts} {packages} +passenv = HOME +deps= + -r{toxinidir}/requirements.txt + pytest-cov==2.8.1 + coverage==4.5.4 + mock==4.0.3 +extras = test +log_format = %(asctime)s %(levelname)s %(message)s +commands= + python -m pytest --cov=teuthology --cov-report=term -v {posargs:teuthology scripts} + +[testenv:flake8] +install_command = pip install --upgrade {opts} {packages} +deps= + flake8 +commands=flake8 --select=F,E9 {posargs:teuthology scripts} + +[testenv:docs] +install_command = pip install --upgrade {opts} {packages} +changedir=docs +deps= + -r{toxinidir}/requirements.txt + sphinx + sphinxcontrib-programoutput +commands= + sphinx-apidoc -f -o . ../teuthology ../teuthology/test ../teuthology/orchestra/test ../teuthology/task/test + sphinx-build -b html -d {envtmpdir}/doctrees . {envtmpdir}/html + +[testenv:openstack] +install_command = pip install --upgrade {opts} {packages} +passenv = HOME OS_REGION_NAME OS_AUTH_URL OS_TENANT_ID OS_TENANT_NAME OS_PASSWORD OS_USERNAME +deps= + -r{toxinidir}/requirements.txt +extras = test +commands=py.test -v {posargs:teuthology/openstack/test/test_openstack.py} + +[testenv:openstack-integration] +passenv = HOME OS_REGION_NAME OS_AUTH_URL OS_TENANT_ID OS_TENANT_NAME OS_PASSWORD OS_USERNAME +deps= + -r{toxinidir}/requirements.txt +extras = test +commands= + py.test -v {posargs} teuthology/openstack/test/openstack-integration.py + +[testenv:openstack-delegate] +passenv = HOME OS_REGION_NAME OS_AUTH_URL OS_TENANT_ID OS_TENANT_NAME OS_PASSWORD OS_USERNAME +sitepackages=True +deps= + -r{toxinidir}/requirements.txt +commands={toxinidir}/openstack-delegate.sh diff --git a/update-requirements.sh b/update-requirements.sh new file mode 100755 index 0000000000..a2b56ba8dc --- /dev/null +++ b/update-requirements.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +pip-compile --extra=test $@ pyproject.toml diff --git a/watch-suite.sh b/watch-suite.sh new file mode 100755 index 0000000000..04a5d34ee0 --- /dev/null +++ b/watch-suite.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +watch "pwd ; echo \`teuthology-ls . | grep -c pass\` passes ; teuthology-ls . | grep -v pass" + -- 2.39.5

@Vk=Yd;MzWy?&OsSC+4_Gy2 zWzfAFWy!MoL>$mp8ZLFHpssZXO*~cyR$U+%nBZKb1!_aS0Twt|3?X766ijMW9ny!C z?cR>)Zl_N`do*R2noL}U3{LE$(UYpg;PwkbkiB~J!^TsT35~f5s7j|V2m%w7sN9~J}Z!9#vV8OTATN@p40e(#4in6vs>*j+Ph@&s*6o>=#Z*t3};Q{xOVRkmW$L;Bse{;tGr2@a~;TVT8D+7iO zWjBS?pbFV}dUz}*d>Fgi%p#L5Qz&+VF)o5Lyw4ktAvwW^bWq0sk82=chV!n0sDK^r zbnZoP*C1$=h)>XOpS6N)wNAPXj+z>2MiOxvB}fIJQHGr14?`hm+uuSc2!nm084i$O z0|#tzI82iH9pqt78^G0)yF*DyLg$lrENUB6UqFcH;tZr6OzYS^y*=_cfs@m4ZS0$f z4~V=Vh0rPjE@fpSX)WL?=VPsYCJvUcxm}+J`o&unawtpRe94HoXF%cqFUtJd38P88 z$Eu1&n`~O90E=M+Ii|X6!Rttr`-2R4lHImy&M`^@aO@f+eodHASx= zmTnT^P_qbxQD4zt2x%{%&^D+sDh4Z2Q1g*Eag-&Q;sebg(&2;DqB@M^)+I;80SJUp zmCZ@6M}7rnV@jEhPSxQoWx|k!)6<-02&5(Za7y}gr^vxwUUMIjdA4yqyyptX)|BS5 z&)Iq|W~WNKzApsCE#+) zHpDl$wojS$hBRS&pU4ZZg?~>1f!Q_iwqq1LFl-po53vD63&_7_txYBJ@?hwyHp1be za%8xM`BMTgDz-Z?yLs{P!bV;&3^S`|WQ=2%MYSo11F^do4&|XG_5`yJ7=jzRQRU8% z6Lmp$|H6SY=*!9n*D!neXKg(v^VLwBVJck)^mb<>kgiI1Y_P;5x)jL0I`HXOr;=p zr^KNhW1-hCa5+0=>zZRr?~YbDU-*mV?MqNF$3_ely&ku!m|<&8kJLM(_)5V@;fTtv zrT1lfC-$Wa`%;X3LFEAGCuk&hR}pkD>vlLvb)hh0f;q4aSLGNg*VeFNJobw_Pxb*f zU}~te!xifa_8}lMO2H!yS#(07(q$~Hsk2Q*mRwqIS~=0XC1-u6{#5yV`yew8rUDWT~ znfg9gecwKr;stsi39G9b+M3qe|t zCZZXX+68lr&{;?>+zm)BST(yXg2(sujCn-7zGP0DwLq`1OS6-Z5uQU-^KsJdUu;DPNZ72TjdoMv1mng1z3GopRIj* zYFL(5vObc>rriQAs=(1OTQCD4R~vFZz9ACMXzfOgx1cvi0&^`npfrV}-PsMC?=i4l z9+mW>y(A7IQeWmXtd)(f*EW}CnTy?>sZs8pG#99mFT7HoP8eG+ehW)_g~@n|dgC9h zs+t%r$zznSqdUCpMV$dotgRESZC`0W)S4P&Srw*G$H57rK=SMm(EjT}zqi*FM zC3)W5bOI={rYM4^Y1UJuX>yNOG+nBfro=0F4o;GCVUo(#C7b7 zeyq8?XSBI#GE!CFT$*SM`1%6D{+_PMq<9+O2;PEu-u^_=z;f|&`l*(|u7G@!wx&>1 zz&_3)1PvkNARpXRNeO`*89U;@mK#nbi`OU&y(VWrxMQ+zJP`urP>kczz{9u0`#3IA zm&D@G_<<@Si$k__qmtxN_YMya_}J>%N~1FII*rQyMTgK@_e-6@YM$-DscknF8QJl~ zYZPTo-RDV@XA{}qq-Dzkua2DF%#{~2D#bYWwXy<&VrfyKl5zFEwkJ?_)Q`-BSa6e0 zRv@zA(@l7J6EtOtbIP#fZ^);w&1_Gm9%u&W4m;?8sp`l6E~CCq_QYPxCwYWusOq4L z$UTBa5SQ?5>(Zaf%>b{Pp4A=s-E=y>r?os?>g46~ip^%y&C%!Dy?ijRKS-}Pm1`8> z5AvbC-Wr%Q3Tg0gtZaW%SnSDx1BuwV9@Mc)x(%`TNP3Solt~M0ZXTIQ+qi$obb@1w zGh^b+cv*T5D3cs0UqHu6r+DBbc={5V_)qfnDVhDxng4Z|Z=NxmbN+Rl7I_0DZ)Hp# zlj@Xw7_tI-8)luCOr^8NxWY52zPZL%Ci==TfXs2W`%spe-$+8So(|`%?`Wx5*iM z!9tAK`4nw=TFOc>ag9|j<>{Cowi?2Qf(%X1n_xaGAEH6q09$SQF1BOPFGhD8ALCn{)hLT3LKF6mShP- zgbRSDw(Iq(?pQ)RS1D7-EU)m9)+IemV^TVwxj6@G0NiiZc?WI5H7)xfX(D~_Qj$k* zK{QsKrDI`8f%dRlfqd#yUcJ|6GnK-9v*g?>Gvb$`H25zbUT%| zWDUl04C3<5p5fAmpeqmY^VAB5ko+^;G|5hz)tnG-T)V^77!UT71@D!0O3LXAoJu%f z&}JevVz%cQFRmKwvQ5*v%oEgrlS78^0Ml(bMTs*mh~*L~(t*x_xu?N*6^Xkom2Iae z1WXo1-eYoOO!B5ZKY>0C5eq{X#>7C15rZc#SusVzkqsw}UbJWFfR=0qb(r+-mHZc( zRDw~zr#%OjIg`LS(F>grG&BPwlv<9| zh{GwkBbK<730SLpwBC9I7a%2-XQ#h@*Y3Oe&z>D{w-u%2&z-walAgI#;ttRw=JN8P z_FC5xINib1UMvoM5->dYC;Xv!io@z-43UMqM9=> zJAqG-YG0C~1^G^qP#;1m3x&vXiH#sof3#%kS?MwTDJSo&bIp=}_W#LsqObIl>jZ7? zbFxlBY+kRpozaF}9Sw-J$P#TRWm-G*0lEkHR!SLIyTbsm;A5>#vn0)A%WOTYjF?c} zl<7$ZTRJhCX;QFEb41!}hY>2zY8uI8A1Gl6YtFM&fTp&KPy}XQDDxc*yo~-@{=KAU z)4X*_Pt~~BwoEOx|DRBju(^z*qM`(yL*iT)9u~iiQi@)2oM9^Iq$>a zg0S{hoX8v~gYR}y1?3n9o(J2OHv)LfP#&-nAWi!CRq5$)R*aCRpy`gcbS~aN)}L{kdH8~C^j@ki zq-XRJ>+*i-E#1BJ62OIeNpkPJFYN0CgFwIUzy`~o$sa=$V3tqbHc-zAuAD`@d?*wM z4`lH10&S34lF|qcw+ZWcOAvwwx!_#z86TyxFm7_j*;E0tgmG>yTpg2U2DMJ`kPU06{&3v(LiPbTqJc2EHmK;p^_S2 z@I0o8k{gszsk<6Mw}7V`zzLb7m@-E71)GYlP)a!cp@&6x6mi^=wI_W11A=)($;{CD z48=I$5oQKRugV$*7(x;n>eg(-Z_8k7o3EVJi@(z->wSpb^;N7Z(K1$%x2HwjH;*SGd!Fr>s!DyPNLKvVRxLIc zg=l#qtYA+*__Sb%;+(`LxJLiifSL;$p)nP8LIhh&p`8VdBWC`os;?!c$4a z>Ex$kdJ%dZLcI+FcDmrIolYFdVr@oOc#eb+D{qr2xTU2H*1B+BDO1+I*#BQr*+21>%2kV-V@{}{pNhjU@T zPv=4>gini{+OqF}M7I7OaZ`^(hZ=xawF=;4nMWmlY+m?$8O+DVj?bhNLiI>;1pHi$ zA+hha#-G%DC&n_>yU_0#>OmZA!FOtd;{;OmQbWFQ*pPfx!amhuD3swu)c=Xb&)uyVgNtROKQY8S3jz)+OSOr;N7L%3NgqNI^DGWBP;D za6++jkzt|WR8xtFVWbs?JK!AHEmY?%7|5ehT*hGTab5MSR4%gB&c{L2wwFy{ z*+3@VVEf;EYx$5y?c;L_Ybe#34r|!xwDdpAfVG@sU?d4?IN=VYQGE>`5N(+PtS#s+ zRoqayIO$>ttQ=w~SQBREyWr&MoMp<=qUV!sFfXjFa#kv5*=pzGz?vQPzx?uJ_bE_o zTsEXBuHp9Oqc!5RAZn2@9%{mPJ1uA%$47?RTIy;P_n_&=`gNcm>zhD78bLp}-w064 zPBk130+^m2HIVKmvUN@)9Mn5d#vytDEg+?kZfCq^^|4v@f&H%b!?}C8>zGD)Z(jat za@Ndi#`WAH-pbW9wisK+$GSS28_P=aBYHLaT48@}D}oia5HCz>40ajGv$+b)sS1)H zR|3%-veccUd>hH$58^R51q+Dg=*Vi9xMgqtpLY2V9|aRP?&%IIGTa6jDb;|dqez8k zkhHpz5VA{C*DC2v^@J!v)JLeQmta=L82G7&oHwd5x>2hAdIf>p(Z*4=$!xCzP zjI?jTxC>-D>)W^OtZ%+psICkI1Kcn8AT(|eG$ zfD(Z{d;k@*HWT&3Czwx2!}h9=d<0EY=tdV2g4)u_-{2Dx@cV-e_JbLpXF7vOXf<62 zqLn{8SK7DbJk&WN*_FlnL5Bf0Z2G~`MlVJ%Np7N3vOPv6A1&5%3QO&}+Y8hx6|KmEw$Z^9RVx@WVf*4JB?Sxt+8LE? zQT@|z!mk|!QIE`SuA>^#?&}_iCqM^8lI7~9H(&+NL6pi$p5P!E0m;{gpm{{6p!lpV zCt3>;w@{Z9mA+L`x7nPLOb65 zVb6(@(W+U`l+VU*$~UM(oU)LeCKhdNX=DYej>-<#J4owXo6|je?0g3B_`t>@VA1b~ z&moOxM03s5nImrvl>G4I@UuxxdUuawkZud=!baFMvj0J_PK12@0FL5Po^gxT>*c6XF9yP#=R>SOekdL znW4N6L@yyKfLi-y5313?<(i{)S!btVEr*L!79x<_|9{v8x~$Gi7mj2BJO{_98ekn_ zmsMU_;bIu4{+H%cpF{9mJ&TJ3>Z?y3U~fVD%rOQ`{@u^+o`E zR+{Xr@zb!Xw}?BeD&~%BY^X!E(Xy6!i|g^GDY9~3L#Qdho@z`d$B-nMOp2dIlUp^d zlU{GCn?s(>8V=xbXtG_-viI>&X89PLD~}uk(jf!r7|WhPqv@gNJUHthTH$>u&j)p0 z#gjG0Om}?Cf+(})UTbzYIK!pw!eQoal!qWQ&E9VAQ0nB}O25Q~N@^D4bw zY}$*WCEWonZNri1DSg9Ruz5&SESWI;B$=@E{+|6_qZ{MQ$yU&^QEX2~qV_m08#YN@ ztK+~12AiidQwQQ$Y zfUmBC_WBv43EuVv^hK+xERVBJmi9;MMEX241~G@>=~`0&Y6k=<%?+e{cKoj5quSga z#Cg=o)GX+#JlAAMyW1SCq~ZXZL|mGt36qcESgrAS~Wn)55iG@8v$%y0j-c zo7Pf>=uW7%sa4PkBj^+7Q8k#piOqc@Zt{q2Cl7DiH95I!+q)+A7L`r3>Le1odd-7Z zi~XrT9UmAN7yI$jJ&4JAQtGEm#UEK!GM`dusI(XkPK;@Gq+N-RSu(DRvTI3U1njY0 zI~;7XZ6eRZaiI4gXdWCO_9w}Qf*@U!@2ccEM!lSGKJmZ}F6~0Ez z5ij>?7IIQt%5&uQOBP5;!wFxL5C096mf}A%td!V~JUDp&{eur)HT~i%UOcUmO6*LT z;$x|uB9%HTUc*1`F$@uv5)Vg3g;geF)}#Jt1PcSa2G&M%HqH5Aq{e|qBg5rgZC?;^ z;#BvgMXa|hXv=26lqM1zn4L<~M`4-gwccsWv&{qtpq{ch=%;DAyV4}vIbrIm9Zd_= zcgXO5vr%(AOO9zRXEf)WQG=K8L2t@6R;BR1x%tU`4?RS!-OkkGk3K4_)UQN=XFY?^ zOuybLmuIFhR2ajGI_E{}O{K1?P9 zPaQEXuS|ThSSPB->8*!_XJ?dHq@C805+(_wvO5knxL6z>+9#;rrt((mra(L8$-_@s zhSCJ?QfH!4C9ZA`dMlkHMM-d`6*A9RvI5R|)m5&h zNK0}p^);3Bqmp_*QW!g)42Co5K%JIguFYx8Utul?C{J5yo6}ug_l&jKo6|7CCsmP+ z#WAW@Mm#TZ+bDYmp(?ahQ!teI!QNmK;n+(Vgqi~%V+$$Ku+R(#k_6Y*AyAgMgb7%y ze6-%X9D1XGKAL}#sV)+m?R{OKEP9FO*y7yMp7cG52V=&UmWcBnK$R0{fC4We5rxhtfK&n7ceP(}J%MsH8 z$86Y_D96rZy(H-OxIJ-XB*X*BXo!cRk{CyhMs#P5c-pfF7AOm`lBa7{$j?#>QJ(-& zVCSO#sb@*tq|>j|%VHYsSeP2fF-1lLZ2eUw#lk?SM{P}2aYIRiOp%+1Q&OaL5$(X6 zK_F2g0HRLED8L$hU>o^j5)n8|Pam`sAeS-(#@m$&0!p4p;o@{-Z&%l;*fI=pp zsH9Nb0d{_i??Mw&KJ#317;HjYUfa!io`0&vqT(a zR~ca#Q!lU?69yA>fDK@l;cCG;6<*oF?Fu~}c81)N>c)%m zYLf*(>~bHzL6yln*wol6-s>%#^7-0$yq14Yse&oug!%~ClxI=C7vL`6C^l}kqN+$- z1YsjOBPAQ+p*Smi56O80?5He;T!l;)t-zqL;JJ;!pJR?H^Do;0M44_-s!-iohE!TE z%bDJLWu_}pYR~SoGRqZ8cw*dNVtz4W`msnJ*IMRForH;;8w^{F z6|_1snn;-nZ^G$eVA+%xHNBBI<}?%<8Yv!#kHr6q6FPWs^5dU*(S|n^Hv3kr%O#Es=#rEQi@ zA}ngpd`>t!Qc@uU%U8p{+yZDF(*_852#)GZR}+_&-m1DY!X^keEclK);~;39VG}SW z6Tzv(*`$9i0mH;d3q6e3rB24WJkzj`wtxiyyhVd_Ea7%Fg~X`?7m<`!98CTCXFn6M z)SvyqYeoIN_olwjx;7gg-tg2@ca2#l9;+b(W(8zre zw{3IqtC;70oZ57VDE81M@p|mg263l@W*>fdgM(x@ycv7+dm0F$ZByU>t~3ydY;js@ zjWbqBEEcPcA!8D02Gloxt+{C+ej~eqc2tO)(2L{NRolq?M^HjYz|PTDQ9Du+g`8V5 z;^E@VcOeUuf1M8bA46!STZ@* z4YFixLcOrh!OyzD*eYUH(bg?v>(=zNx3Ye8ex7^_#xd`#z_CnV{sl4ABpn%U@C6Vp z2i+a~>w*Z|0ecvPVgikb5v1QWaRtt3AOOX(3@;$2h_ddNQY>#@-jK3VLD(OYgPl13{+zl;^_r$o<5jI| zw{(^iv~C@__kypqrm>{BskS6u`k||;+pD65k@139MbE~T)Catk%F5wHQ5>(VtnOZ) ztQqXA4Iq@2RD37)lQ}$_#XGEm<|al}6%~X-e&izT+WBGbDOrIj$aO6`jRQ1Jp7AuZA$fBZqs16y({egN67hI#-^RvpJ4aPHGu@hz)rZ6899+gh6HYTy=^AU%mjpy4%i#kMUFSy@BMOv3%S z0rdv`ar}&0XNr|ZOa#izv#2-^6!9I|Y6QY~QzB6%Ms{uI!m%(PGT2zpoTFyVY9x;r zBh(KqHYVOT-wy!`b8~s^2#Uf%Mb8}s6R2jM^%>gAb6My8oX*%j?WF^x9Oa34Yikqe z$UvwmSQalUOW;O^&Da*iHaLg34h{_tjx<$Hm7YA@va6wX%h1GB(GL^f5#p~B?<|^{ z7}`?Xu&d?p$C z?=xRuj3K(YV03s*Z%=ns1))pQg?#MW$UvVN@C}wDkUd@kfemU!1{u}^APkYjp#nUR z8~~KyD9L*nQbkZAX&B1Z9kMV^ECOG0cDBYB>jx@LZ7&ElcptzdNrQScFOH_w4KHev zX-QNll2Us(!AP(omD|f-V7@G`qNjOiYg=L2nyc2e?d+}SDoE@YZX2kJhAMhmhPSpB zly+?&!VhJwCAE|NEo-VG;`zmuB~f1>(cD*GJJMZs_I7_HI36x6tV)&?*OnLJm!|Hj z>Y+~j;>!zTLt=>}=C}>({voV;hw)X?6_qlh%x{EeA$Suzn@%1K{x_xd3MI6kCPRNN$_3 zYzJ%+6|QqTLgEY~bsJEicL;P-${e>phjICnZJrTmarHO213?o8e;8=891W7h&a^y8 zm{!w~!)ev_DkfxIrm1M;1;rut3k+NtaFYHgR2Kh}?|erLr6#s*+g7^fs`V|`HPl`; zxP462L)n1;Mp`!ZRK$Rv3j0SS9;!ne*H^HQ32~iOA`sUC6hy^T5Z@%U)Wp<@Mj2vG z1ld!Z2)I=$8Obq?VP|+Gq)6fDbw@ILK|#>XhK|b4tBrH(aA|YvMo)j>9T@AdivgDF z$s7m9_yrp$2_W?lmplz?l@qYHOK3V1r)Ee-ca01U4vXQI-6SV#t*4*feDvrwA|(mO z;J{5IsmLifS?Pkilz!f{Hm?g+q~R72Cf8wyHlk86P&N5R zqc#M)lhpG<2dTG7b-_;zbRd1m=*^r@22d`QiAFWX;?*@CU7`9*HxTfC@B)!E29;p_ zDowgwXBku@Vj@8o6-H7<;hBrzGf5XO6%iWR<(6IQl&s4*8GHMnMv}UD@&ax2D+yr} zLj&TsAK(1(k9Oa8=uY@P?7Q=Zmv=vU>#es=PELM;-VdLcoWu*^Eer9_dhDGKHR1o* zYV=lwSJfQS_pyllKgo9|($$BekdK)p)vS%dQxc_2NCvQK(3*xL<%P<9$Z?~Ebu=6b zci~&p*S)YeXc|_Y<8X*-4#pc7UEXAbBjGLsMdG@QLL9V095gx)q@94tG9gSgnF$95I^F{>9Uk3Mn$UIEF5KKdE*?nTQdNrA z!p6czV|+=zSE2C|t3=vM3!|VR%2q15D#Ei08&$#&UXKP5x-CjGjdBO5lthIzw+6}w zE{1LYl+_!lohB9ofMur|eksNC1)TR`WyIInM$XYm(_`C|1+Rp0F!ei`&%R7VtcbGk zB1YRefpuVMpzi>^5+)=%X~Kc+hDwSq7P3~T1RcIT*{s4@Tp`newp&=(@y+}2$n`$iYYpYEC ziB##XVy8#lHmB=(G^nqo4q0F{=VU za#Nig4stUgGx(r;33zr*RN7nihy#Iyvx^tSo#L$d8$et{M6Dtp&WF$DH|>14^t!CW zsPxJsr-JZhJdkn;(3B4eHFbi{4%G@c+1I6;VrpD@ZXu2wA&8Y$$JX^*yAK_@_S!>-cAG<2Y}mMC$Hom;OdmcxjXya*;78xT zQ|DqF0hd#9_~t{`@7ytI_4RhNRVJu0wdvy88@oW8o;TGU2NK*?hN<}w?XMDyFs`Hs zXa!Tn?1)4PTxYR$SvsNv`ttlSLdAwpPk)Z%n#8tagiz3ZG)$AskmXp^p>A2iC0583x2eE&K7K8*eTcl$mMz7xN*@AKh|R|fPZ{7{1$cZvr4 z9_UfL&AtaiE*`V*ea5i(f_?8dy2Wqp`+#u`yu8&oL8INg%f1ic`-koOuu*3^?emO2 zU#0zhKHy--=D9O>%+DUbR1Q zGASFLP0r7poteLN=4kJxGc&g*_f4NZo1C1RJF$1>`1zC5^TWM;eSKr>G`4^L7@M#~ z4_lnVCO3J9j!MJMxhuc*3o-XJ?Kkcb%Czy&u!T7bjxW0|khoAG)_ZkaiIZe~8&n%p~k65cZW=@}*J?E@K z?~%Dv#%5y^|ydA&oGo~>nTTNm%bH)h( zJMCENJPj3-RazThWH^aTK3-$u~HSd8pOlJI`RwGnh5oI5TkO=Db}`#?v#~CFL`c7$u37pU3qm zMmr_f;sm~%Gmhck#doK7iQkW4Z@DAq@b5fEYsKGQJogAzKWSWpXPuOvX0RUTaCdG2 zEcBx79A<_8f$8S|E`AK#ljVOVYA@ncf|`@wLm8O|eFfxVO1;1!Met86L8N9Gd`8Q` zUsV{Dp#Ig!Tv=<>K~tJE8jMDx$!IoOFwHiAXNS=VEYuCa>qXs-K5#w#0K-9J2pczI zj2i2V^#F}=;Jb~;UuR(>CIRSMjIG8tW6IbL5WWKV?@Hq;<7#Z;ZetH#eIFL=0ABoB z<2vJd$YqC&8;l!`=NUH{H)ApW%J^gB3&x)qF9GiRmhpt~7siK;j~kB}j~X8{?lV4O zeAM{1@oM8QjXyI!WjtiuW4z3Gqj9fszws*Le;EJMcrQraOO4kXuQA?ayc{y+zZ&l_ zK484Z_@MD+$C|kZ&_SZM@ldtML}&VdFvL?Z*3z*BXChyutXS@wo9> zk&m=~F;QS# zlmxj*6pIq*JIh2|l#7I@5S5}zREru>Yy8IeKcY_5i==1}jiO03ix$x;+C;nP5S^k+ zbc-I*E7piUu@)&t2gINl62lOjM#Va@UW|!x<0<0@FcxhT6T%Xk#H83Pwur4_o0t;Y z#SS=(?G#svtHjk}m)I@#h`nN;*l+yS_?%{fqpg1IM5I2hFiJQdDVp<#) zN5oMvBaVsV;ubM0o-a;_lj4*(E#|}-@dEJwXT>>jUfe2f6So_`6n8+P`UCMoahJGT z{GoV}c(Hhic&WHYyiD9H?h`K;uMn>kuM)2ouMzi)*NO+k>%@cN_2Q4j8^jyMo5Y*N zTf|$%+r-<&{}AsG?-cJ6?-uV74~h4Rhs7U@_lft5|0zBo{zUw#_@MZZctm_y{F(TO z_^5bPd`x^?JSILNJ}LfOd`f&;d`A3*_)GD)_$>ThJ}>@Cd_g<`kBKjdFN?nxe*@k0 z--^EzUlm_N_Q7w6zZc&WPr^3)kK$Y6+v1!7e-ZyGz6)E#_r+7<2jYj~N8;ba zkHt^KPsPu~&&4mqzayXYFU5a||0RAUel4C6|0(_p)q{T{{*QQ8{8s!%p$qPaQisb9?>~_dc@u=-j#KBOEt!MBfIt9GS*LgGbe63uZ8V4xe}lZMB~V zYRj1gx2osO$V+6a9waiOZ$n$vEP`9rfHU%vKjqFN|G0Y}nbI92$MtP=3PkzTw3=)F zcDH%{E$+R4`{C(%|1J0@xZ~XH$)hvDS$PTVuqQohPkM)%^sHK>9cp@^S#^!<&`&jY zTw%^WKmQ8%5&6$|@1r|CbBdlw`*3`IX6E$C>C;DNj|8urK63utOz@<9abNYB_m;LT@_Pg&= z?>;9lv0b;EKYe_9{`{$v)925{=DeSRd)0I2)pPfH9u=MUd4CGsowrZ=H$Fp1yTv z{@lz_xnlWpwfCKzyLI|R0Wb!6VKCGjphQmm1?Q*H7?bB$kz+UEJx|V@Jv$rXgL|*s z9YiNy&fUgOVc7>aA-<)&&fY42$7}NUv%pcOc{4*Hgbhy5$^q1PyVQ8NaJ*g4c)RTJ zc;$Hbo#WwmH6Cu%cxWKU+qFBo&-4D#vz`y3^QUL~`ZjHe9`}5iH+^gt-}d*72B&+^ zo;q?ScvxMIsLN4xnNgQx>T+CNZc&$6b$PzJoKTmO>T*h5POHnDx|~s$7pTj;x|~&) zbLw(lU2avE+tlTDb-6=b?v$5&U`XH^`(AFeyf2z|p;vxUFnx3u+j(|YJt!aG1$-%g zicIs|$lIt*Rpf_!MN{k_)X7ml9C067ng7W;5SmHvJtG(UN0CZHi?N=*}!^6D#>ck%uVBFN(dyNcC0J z$G&&Bq}WS=)US%p*!M122V zX>`f{`RqwxB6Y7Qlzs1cH!qb^zbdLE?~6R-$o|pO)r%?Qud%afu&T(D{H54SzT`J~ zBv@+9^PXin?^%}fnagrMeOVNllVe0A9h0|Fo7~xBdsZlouGww9Q0}2>s`DN0#ELwm zP5xMLNBU$Gom2hpbQh`6rFQB!#Ujf4LYMuMpYzDcs8%Q4oeNDXloOg$*Dwf=Y!o?n z%iO#|lc8z#9-;I0b;iDu{RQsNJ30ppGLO3u`B+@@soptB9EM+yoty(Y!(H$WP@G-6 zW2eubI!x8maej)-YJy$p-pVP2!91xOG&wwTa_+XGx%s2VW=_pY(n8`;j4w`2pFMIO z4^uBtczkC56u7>_Cly`|%_=$oIbYx}0=XS2H%?BUI}K?c%2enE5XZw`kDNJs2H&Z6 zGR{}thw*C!`hgHM0s_+TrO5lUkbhqVDf~5%yI%`=`gM?KUoWNEH$v`yGi2PiLf(Bl zB=UC{?}Xg^Zb;w{8SjPU`&~%GlyQG7W!qmu#{DnIsLw)Pr124@g4QksD9+X?O6KL&~m8tqNWA5B>74G>%)o_L6dO|+J@gjhTaPe z3tL5?HR0%fQG>{a8vLt9Ktwd|d(q1P~>E#V%j zZ%Xtp|Ab$z!Y2Wp3dize@2D=j63-COqM*d2{RHO!fbSswc6+`t@m_=Y4i&HPF9N3U zuYjh8P8wy<010S(yuTFUdjVaMvNCckcUJxQ1+jt{qX@bZztkxe%e%itk77?tz8ztV fBe8t}{(Q1MuWT*hRE`!wi@S`M;~Nw6+Wmh4?9#0s literal 0 HcmV?d00001 diff --git a/docs/_themes/ceph/static/font/ApexSans-Book.svg b/docs/_themes/ceph/static/font/ApexSans-Book.svg new file mode 100644 index 0000000000..8af9af2bb9 --- /dev/null +++ b/docs/_themes/ceph/static/font/ApexSans-Book.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_themes/ceph/static/font/ApexSans-Book.ttf b/docs/_themes/ceph/static/font/ApexSans-Book.ttf new file mode 100644 index 0000000000000000000000000000000000000000..42a008463e6a953c99b882f8fe78edffbdfba14f GIT binary patch literal 199616 zcmeFa34CN#l|O#&d$p&klG;+0sw}l6Rh^`=R+36O>7+VIC%u#2==1`erkhO`yOC8{ zT1A1D%>hTo1p!eQ2Qln8qM{<8q98xUVMauBa8O3U!4{bwy7K>?``)Xnq?4u_@$>)x zKc8R2%X{_SUC%w|+_T+#Nf1#v?IV@;j87c5;ZGNp{ee{5%S6ICzG2hwsi&MbMymaf z`2Ear8`?S-S66)(&le)s)ULC3?n(CF@DUaj76QsO_{1uKp=bXI9{l8CsfhhDa(yt-zyM<`=ma|^iQ9kfz zDpP-pH}u{2{nLN({YQJxIBDhw`M1qvx)nd|q{tko4yE6kN1M${XMQkqt69F`9L7JT zg45vd*<_XP6l$VALaXTl$2$2&RpR0RA*=F;auwcOt=+{hsFM!b%!;s($t;9W_Yr+9 zN70os7yfJnJn%32zl!qO3|eoxRhXhg`+xi#nsGpRjp7s~lX@*p(342_0%gT%G@z`Y za%Ga1s-LH&$}cI4_XDDbmf~s=A11qEqXC)r9Q}SfS~f{zxVGZzHly|}OZ8)H;@$uUBHhCTXz3GTnb{Q+gGsKoPE0Ix9#zyF>2iOdgJ0>8#2 z9EZryw8YPRSGQ9qt^xV(OTZ&s;64e!XFyD7cZsWvldsYU;|Ak}@*}FnSnS|2MA``@ ziSLX@jAIOUhDGTe^de!g(4A?C@tMozJC6^;ZN9tlj>m)Xe!e?sQSr|Fm}wICH!-gN z4p{#H<6MUO|L{07?D4x5W6^~B3CeY}hvCfh3mW=`q+zBVjefTBm;s)AU!~u*-zVbz zTJqz)MdA*pXPTey&iKS*I`l7})6Cc=ya>XZK%S0r6-rC(z_&S0DbEPjD} z1K{5Sj8jN6CQbu=zXR_rr1MfQXk6pbUg}m(K>nY~{B8Vfz(at)GY*)hac@W4J%D=< z{U4I>-->=Qe_n-hw~9D;^QXXbV|eEGJVuO5C*iwIzbA38#%~wOn*=OTrmCRaB>2i0 z?$vrdT-T8Eiwy7sFxh#c(k$hR;!45`GLj<6;;Z*QtOjU?JPfFg`>f)rxWF_dF)d zM;M=(7kH^k@*w7^F{;DwT3IG!0j5d7LmN-vp*Uw|Mm$6=fsS zOU2(QRmu~}FVsh)Wzj%19IcJUqseG{ba8YvdRHtSOU9MBC0-u)#>4U2cyoMtd@6o& z!w=v5-v25yGpIF6x6=Le&-872hJK0K{+kX73u+6awtCdoDOQU0VxyQ6Cy5WBwvh6; z@;qu&qTXmIS{;qb+WO|y<|(Odx2#Rf%*_0XXy)mepUnIS_a|oFnt2@eSLv0R-_h=w z-_G1H^UBPB;?2vreod5rdge^Dj}AG1wTn%B=fUD*$F2bMK1?5>m+7 z@*BYXHu@snPG6$`q*v&7^eTOi{)>K3f1uasb>QOZbS_5YS7JtN632_pm=VttTf|ne zO>C!^=+|@#eV-nuCm@mjfcDZ4#i{gu@Rlp-eRKv*0h7+6lfWTQhP-?leSn?;=d*|( zqQz0<7LdDDG6B~tWQDY3CkK_0lU(G6=2Af(@{*7I6rf59Qi!T3Oc7vd4b@T|N{B%s zPEb8HP?A!#h#Db_Hv?~5sFm6vlXrkGXQ+$1sRwdwFXZ&a)DJ0m2@TQ^aC|8Z(+G{y zGDy1Pw47GZN?Jv$X$`HV;~-_PqxH0bHqs_Ko;K4YZK18SjkePXbRz9QAFiY8>F?-9 zK=c;C?@r7L|49EtUy3M0aCEc)UIc z=zNU6j4`^H&Y_QD-2R0=iII60rXo{Ywrq(^Q}dQBdflj8Nl#l7 z!_yXA#Mj8QWn|stv~{HUUf|lu)bMoW$<;`wrtvMhY}!7uYuYh#(rC-HNqZATfr)!8 zriQ8Lvi*sj+!a|fMYwg-(Fl5A)Hkg*BzBG>uQ@#}rlQel*T_nS0v-~>Tc+LoYdwCs z@vCLp!WB#_4a0g%?s4Jy9yk6>i^@b4g`s?;Ff|PC-ycQY)8?jFVp{App3zsCA7#jI zTTtIPY8juJ-glDGljuEy0wd8eGE`IaG+(n;;?ViVq_P-d(!tP zRM`-1owkikMaLwPpo93aV*maXiIb*9V))()Ap(BX;KhgqqWn=N#JJ(TuXWmuZ@wdZ z^QZ42;rC6KBV8ap?ZGXKu%-1LyYOQm zYG!BB8odHU1I!l9(dj8hwcNVN`_!l@8o5tRn!;O#hk;65iE~8W_a{~)#-^stBfya< zMsAI$)RC#(iD}cw&fP#db!2A*&r@4~gGja$O#<;I#&-5b5-5iorWeV#6(~k44mE*T z@MDTc&CFQI`H>f73kpIFD*l38BZ=6SVyRp9qtD!ndHap>TBq&keq>hnlX~~jbD(0h zHPNd>DU`m)37VM&-(v#R*s(!}hlx>qAkiC%Nf@_A2ijs=^yUQ9(@y3*zCry()vRu4 zxK=v!#%Nd1PIpXQ`0GY{TKyXgwq7U?yBuqMyb7aRiS8C)RLgyrSf}F}=t1IIVxZT6 zPu&sH%u7DeqsRaXD$-l*C^AR_HL9DHJ1%qc;Oj^MtT5W1nCF{O$SjhtKSIJ;wKW_e z;lgc9axSKF<`6pHo2HGEz+g<$2A%649>P* z+`rZ|czs)Xx>b`3AijRx22EfWBtIr4O6-}gSO(s(XbJ-1rb&nfyD(4N6T|nJEvnLd zpVi5KU0(c+++%l)3~#x&Y>~2T{V=|mlxCENe`ApM$K>BZSYb%{9outeEXrFmZ{j&W z^CnyT^?&e(5H4{i(met{KpKvO+u@VAlBn!{BIhDpHxapPxRA?@t9%OAE$~C&UBxJo zr-H~kN#yGy@%25$h-7TqLF%{QC!O&B^pPW zD{@3DQTIyZTaA3HpCelP7}3OXqIG_v^~k&7M)))K5N+B=v^h#Nc{|aT%Zaw4ylqbr zZAYFH@cqO^xXvWnf%11u6Vwr+ok)Aqg+#j^Bij8a(J48iQ=>%h`y4L3JM(&?vz)lD zBsv?<@4ty?59-~6diS8-y(sSkQ@HT`ytm<)d4}l1(}*s*h3G@)5?z9FE~_Tm*G}}| zJBdDmG#?$ng>pXnJk)gDKZg6q@$QP3h^~4VP8b`meNe8VxTfILxsd2ud`I>4$pb_; z-cI!QPZE?TDDr=V;)HacuP3^B5z#HU-`0!kWuh-4!|kZ=%lnA#M81FgJ<(V2{;p}F zyOC!a_1yCa(S6&A?nk-@(T<04{qshmM^+PkYn13a7ZZIC>7OtWJ^2>AS&tDtg))AE zG*9FC*-@hB@cUoxBKq06M9({k{tfS6K%Kw16IP06V7YjX=+{>hy?i^-Z;<{!QQmLS z{#Rcj`a?U>>o*bo_l3BAOY{cb|LFh```-R+!i79 z4_yvfxeM2GB$UsQP~kIEFUIv-IB{MkVSSr~<8~6Rn@Chlknn^^_?{tA=_e7gkqGZ2 zQGFpUSWjw^zZT_0ktSv$5#K{1aezcaFNqY=EBS^QawNt~B*t+qN4k|L4|purAkA^7k(hXk#Cp`T>0A<< z@qXJ~Bu-3|I4MeE7us+N@|`+G;`C7xXH1efi%7g5*Pa0qd(nn-a=4x+aqe~!=U0%p z0O>Ehn#4tD&xeril6Df8;l2-beWZ)Tq&eH=|8=j#6LVn;iLcd@xCijK7kTeT+aKIV;^DVQ{4?tL2I~0cZ%KUn zDH7kgkOXi}d>>^#f$IlI|J3Cqe$r0jX{7lnuIG^FXS?Cjy^F-Ze~!cpDCd``|D^*Y ze*HX&m(kAu+(_b;NfN&WTz!Sh=ellY$~iGvrC_}@E8yp1*-B3#dqm~oN<3yvZ_M~dpSsWr|V3L$2caZ}8R&r00vJ~|V<9QU<7}~NN z*9z3P3VGL61~PQ?2YUnXVh zG*WiJ> z|H?&Z^M~q5x%hfgF8v(L&c7$+BL_&i9A$kR?Y#>1?!TOrt6wJN+8ar^K1#|b_mOgg zlaw28BIWOq?k3drna4=^9Lo4S@_ykZQf_^ilrJLRm(a#94Bjr1*N%=0?@EF?v zIFa%Mz60l#AEMnqnk40^yGZ%5iIks=;(D2spYG2pyO6woc5+UF zMAgN7b`dm1a;>wIaZm%QfVmW7<*t%o7BP2d$(m!!3;K{b+#gBpbjvXnhR5+Qq{mBCp; zCfSp0S5Kz1GT^r);>oOucg@(m;8-OHnw}N3*hUPVw3sq6nXqjEH zIxXmx6^K{kGAraZ;XO*G8i%mitTs8`OaM)sMNJ=N4yJ*l%wgB+;9gLAN^W3;o5~L%i!nkw$mXy$lf&w0Mu?57l&g&(q$YMzSjwE}AyCI6tQN*gI|j_o zy@tvl%Vj5q$>rv6h%!>lz%P80HQ^7}<1|M1UAHs$w{D?C8p$Ox*+ej5O=J^UJmJb% zu7Inj#L57oCWIZ=%zh8RrUpQsW<@m%KpH><-xPqW z$ppqJgiRDU<~$ieQ|SeA+fB%r8#uf&lWJ;44RbRB#X+`QcZ7!&EXb-6mmlFcwJP%< z239he>aJBYor6kuSF4h0RlAu*bp`>w%F1A%RZYa*%7??POOw6F_t$>(5`St{YyGgh zthH~jrfIA*5=sxH`X>8pKXR_6!iH}(Sx$zj7Mo5>oe2iSp*OK09xw<~V}k%Qn7j;$3s- zHSzsz6BFBT<^NuP5O*(GxpE0#XB!XsM|o}*7oW6q=Sh4yj0fN(=4ItxjIj%m03Eq% zw;O}C)xCAYy5ZcCf&Rsv?M<#gaL3+%uO~h zJ|T4YyDEYCfB}mrj!KgqRLBEt!7#baa%h;puPZ6cbhL$GXbMGD0s%@3cIb2K`Xd%> zg-6SZ*|P?OuEn?b7lU=+;v5U@#`k>?g|nFW9&7flcxEE+4~E}+Un z(L90AEqXR=k6iRZtv;;K_n^I!;X^8H==zs(e}FDH6K<@7H_ON))cbU4xhRr-Rq0B zMgp-&l{?v0+ce%?lWpl5I(bv$@l6A#u4)R@w}k_q#?j7}#c`L#)Mc@Df9`nCP_}hx zlV1tjgB4|!9!Ib~?eF~96&E#BZyRY|TwkF;mK$(dtGd_qEOjhGea1v>tLl%ZOp96<|!@ z6D%4+!U2kbi}GEQ{8>v#8;OCGCQ_vosqwGFGeKL_)PR7rnqc(qC3-_31k9^ zz<=Cv$M79@T*H5c?~wh^&RmN)nBACj+9giAU?#UPPS-L8LCZ~0 zF)3QDLsb{S^-?8*QcIdEKk)C6%JT3mUDMDr z4INX~L!j^5L2oM6%;G7zGHyjP<9A#^NdXoPwyns2=_hLbZD0ky_mawkj;+I zIqKu#P=(t9!ANhaROGous*2ed#shv<6t}`gfEcQTtre9(5Ce5Fyp1HB652AurG%lg zfSLt)8?2j|ws^`?w{&s;(prn9c4`0OrFE9EvexbECbqXaovqs^)@^Ss6BjJMU{hP$ zrVExE_sMHd?&&%C+DZK`;jk5r`5F4+5RsfiqXq#@8$Ia+dRJqPMpLB;#9E2gq4A1t z{9vMIQKBRb2>@19B2OwMNK1QXAwc z{AjBlQsd;-{{L;jVCNOKVl#wx!3Nn5ifW#zXmM#MSw**yE2V?KPeC0h7JF zY}6e{^^ezun;V-$`R^`m6`!o^qE_n^t$5K+wmsedt=L*`Ri?|>BPs)Q^ig*?c7kV+x#k1UWIOcjEn>^@o| z%RydE<|-B<1w{*kqCskN$prX{uGg_#Gz&1=WC<%&s=KmMn1?1>J(DL~d1cc;gQuct zWuF15)u$|}^3*nlqM3v*vh>V#OLx^}w)cda`|6Gx9uB42t7vB{9Nd$eq5M!?9%&^DoE z+$f9$Ok!H|O0pM-@mOokz03Lx`V<~D#&cex8z`R<7r!ntXPhyb|AyGuEqcX@{6m8W z4>qyCi&g-av3`pZ^h3tw+G@W?+Rb=HQKEp$8AO?xV!+|3RI|`&FhzibaTwj!Ou-S2NZN94p4*S3XuFrV`{!B+>s;>RX$lg=w%HmB zCOS>+GK1j(jWjmkmU{l;Il0V8tzsD-0~_6!1t1H9f+tojFW*9v;Ik04OG(|wYS198 zI;{4TPMWajB$L;>>PLDqOJZfV=wMIxaMJaI{&k%Jdu_|&3l~w1Mr` zGk}4x7%lOjB&%v-zRso^9#UlEyEdl6z-L3HWowNw%t^JN!p_oRHMTC&P5liO&iFuk z`#{`T(a_&CxvipcS$Frc#)@r|V><`J<+W|mj?Q4u=3H)bPq4Ei+E!a09@r^v4<_3} zRb5SyNK;o;s4W>B9A6#jZL6ti>y4})&tG4^WFlR?xHDxBZt7@UwKyJMysEKdQ_!Bm z(%kgK5^1}eO)H-~60KALAU%L1W=WO^fanTLIRL4F;&PTO(2X`r_Rq-$W-OR6m?P`? zK|N-so^X-qnD%tQ&JinB2DBS?PR%G#BpPjQd{$D+V9ZfMEghAhmi~s>)N;mpNiB+1 zBbTA7@F2*gE0JSA`p>GI zRerDTKg%t0f!%4Oe0WPh-0+NPVj4t~(w6vU=~IQa*b0hOu{Eu;RV<7&>#;r1W5HMM z1B)zcZb)@!1J)obQV%@98sE@WSE20#^oDp!JPc~LQYHugrsfv{Y;!Rdp3ydKGDn$n zIS4C#4PYPqpLi<2M_kX>8^gnre}GY-4IM@sENDYEo9Ye*tl0-1IC$`>tF9W7N~c1L zumbF1@R>5K^!P}w!sTqJhwPYfWf+15KSE0dD4|?H-3zdcp_r%r0S<*l$;r0th& z`Kx=uL3s|LMPL$>uuN?c4m*Tvn+@++9FZ_$Ud)Exib@z+kx-GIU0CPscG%q`Bb){+kd{)CYb__M;jSVr*foAw=MdT9mmNry(kacfA7*?{KtSXHxZ@5W_2>cXw99o2&yyMn>4jf2%4t*zm@ z9m_WET~;G5n7I1%0VwW{a)+s7^LeAA=WXsVIoxa)894oFG!zuaIB}B1i7Jk#ak*So zE?EDZ4h*3oDf2v~vt=z{2;Jnc8C@-B6o6R6K{Jq2WRwuYK~Z2D*WziIlo(hL)L8{$ zwFfD=489xjGzG0Ep<<|Yb?4TrMz-|?gFV|ut{Q)8`v;fT)-M0x_Tkp`dq+pkS>JkS zqw-qc$yZHGTy-)mI{>Qej|=@NqnmQ>5_7o5N0>v@Glzy6EcH>7rtO1~a33{I7c+@4+F0Jq7ejyqbSVoS9!E z?#tgOR*2pC7l)PChVusteNoXDJAFLoEEx8S7j?Rh0tf4YI_(w_0rAF?FiI?)<85jSUzgY~S(Vr`}nQ3McG}8$9!yb`r zs87^Y`@PV;8bxD4v(CDJP;O`naoE^W;4~&S%a~LxY(-j;8RCC0B}85km<7HebZALr-0CNr4}+QH$p#r_75#UjpO0%8JNdvaY(XGd49L12N_ zrUut%ewEzo00~VnfrTrUIlW~TgTNHC-SYNu{!?Dq^HC+!*CgYks9V5C!#G)m=CKhS zBZ;X6VR;ttLLNngZ^?)}NGL`E`H7K=Q%VxkEMpD$%{nx9+DOYu0J;e0;fGm{9(>dj zv#ku!GeOo8CQt^eCY%k^Cub@+dXBOR_VgTm;l?ZmitvEP<8Q{Jc=FeekBdFy8gk_y z7Rx#O5AfWBzYhVPDoxJvb7;1E7rwGxU&77^oI6&}Da_)YJ>wkefciQi?{nNR&=WDgMGEGS8f%oaXOsX(0ur#i!u(My ziEldB$W~Tzcn8`hBMwuY-kM;?!1(beR;0?8nAQ3YW%r?*m#<|$#BgG~TPyLdjQt?& z=q$0=1j}q76|^J3uwru!mYh%l+o}qm`2%ADC&LuZ=*aR@qL2U|WD0~H@)JdngYQL5 zYm#YEJD|k9L-(Wu1ULr>$~>}!5T)Vm{XOIYK6>;_tiWFBH%2N z!xOCL=q2}-M_0?mH)HbS86k74n9T1SAI215)UP`6>=fA?;(6^hR(PQ}83K}ZCY%tCV~rgkBk)llC-!S~$ShJQ zN7O2rAP{pV$$>Dr0~P0Rl7d0~SSouL)fnU5kWhW#DL!AulkkbBdk>jLx8?KOMok}3 z;)l)viT_fGA=>y;05=AMX@+GXglXm2J!>aSx;*d&h{MkVm>5)pFSE!dq<@v~0jRlD z6EEhL7+;_n&cLWqB$`GfolP<64@e1h+vzyN6SFvlc2!N?(XMXIDbrBoW07EShON%jSL zWa|NVK|u{=c^G(al9q1vFwIISDGpMX{LVtj5gY<~lt@u9X7*)B#)(^RV-?3$KiqxW z_;1Fh`opZ=+$_#!11}qTSvYPOT9;0*8)}e!TQzgJ@-K3nnrYKR@hCex3LGumjQBv4 z;5iTCB6AcumANz%{j86`dVHM|kvhw9Yv?MfInbcR5E;$3Dy^>OiII_sW|wilP}Jo+ z2Un(C{(-Z%ZaaIx=WJX%EG`&3pS}I(kBy(l_w({UTfF?@iWR#rS~_vfX#)eNT{AKE zp{Zd^_}ZB5l4DjU*5urE)#1XZ6|~y=5JY}yJwPlBR8yXM3S-A&Qi;^Br^Q>d9K*wM zU<_G9;F+*RD_2ijGM<`y3xL#YO?)d_6Eb>hOi+^zJHk8UEm#zevBuoHsmq{#-#n;i zvHB9KC7wv!dT-}I%;AU)blyAuUt=fsg_f+jZ|fIr9^36=e-M$#wcV*8<6v!eZTqUz z{Q0|}-~b+imO)2I0}uP?gE_>Bc6CN;13u~#eFZ+-1b@4l4MQ4&g8;fUNQlLGrZlNs zg9Y-eiv=BQRG5<(+5{$61}CUcPFakqS{A& zt;^Ec%>#9z!8107`};b*-p0&eYVlgw&>EMv4R7zO@@G#v4k~$OtMYJgQFm>!HH_Hu zNZa!6VRq6hCvZK~xvDLH=x5icsHfr8=h7J4LI7Zz{f0A>Mm2@+A z0X6~57TN@kUP&3GL~?^DAShj$LQ0w%uGY$r5++g#hAAh=@oLE?}e(Wf@h_A6*PL^e3-;%^#{c8GvFeo~dL0E1DT)coGg5^K{Z@g(> zBfa0#eWaL!vSQz|$VFRVza#3xNF5X}}U zRA%A1K?Lv?T1r8oWMpDk6uvVz5otYDQGJ^Cdx`c`IjejlIN0>VRA62WP(Dpmm@NNE}f7sj>^!w;zCK}LKi+AvtRe8=LYZh4di8)PbAg=Z-G z07j)^^BaHJ`s9;a|MJG>$DvnVo4=f+cErWvocy;nJOm7{0}O5S$V04TpwWg}7qEcI zFd;I44rB1fM@fmCq6Bn4n@-4TjAGmk}Qx z%fE5_&wqZr=*j;O5wj2HuMigj@*FS5J_kE}b^)EL0meCW155=7FNYaFr92JVGwjTU zK^9t0Au&b~c6YYO7P4spCFznx7<;gBsff-BH`Md$#+6LB7FrSuWW{aybHwZU6U9BP z%B91thdzSo2d$bwJo~>QzIGnZbJ3cCrXjJW;PX_;y$Q*1V+3;{*fR0y+!a^kt{ztN za!xzim|Zg)EVkvpv_fptqjt6U`upc^4=QDXf%F5I0zPa6g~*N>k#Dj+J~ngWI*Z!_ z>h+XhAyOZ++5~q2TO6GvM-%8NZt9@g5G^0si@&&A{ zWcf^5hUT9G=E<2`AWs1$Q&snW|o7v`i)p_BNiL=FobwM zV1vy<5#Bt-m^p$Vmw!~`4q_A;H?C2J1n;-2VylwF*K@ceF|tZ!<+3%iVDTkT78RCp zIeL_~bOwtQx={Pc9TUpX*S-d7Ma-)BCicg4W0%Pna_)}yR6{&g9S-=)>~h{>uX;}i z&vloMNx)#hW_3W_40F5`gjp(r)0+`Q_ob_aC31_8Hn;SZv%d$HFBuzUjpRm; zlv!I!z&>(V`~hk90~N#HxD+l-U(=Y?)|#+0yIWfZgs28R0?LUHNNXZ9RufxS-@BsOlPZs|Np_4jdELon*^eJmebsTVCsE@K`0id4Srm4bI|kgY zP|ILL{ws^Bsv_yd(dw?I8ncbZQvt2NDPG5TdMGK(Ij_%~h{vKe5pU9$^pso8(u=|H zZm$PW8KFfTU=0tTjBV8b4f~A&X^n}&5r}D9$`4&7`4A60Up|uw@d%QRS?57ssDIKC z&@m^kG%10_NvHw?LHX>Vf)Ssi*tCVF{YT0JyB+KDMbC**D;EWaiME)?V)d~78PQcT z;wozxArOyg1!wd0^gI>!*9>+P0uH*3(1OO*h%^|Bt`HBFmF4fRs4zkY1`BZlRgIV- zUkUipU%_h;#NEL282emntAhboSxMZv^to2C&o#`vzr^PXte(&3TACKcW2mH7IA3&? zG+<;6pKECxMvvig9rL!YA8I%*Rx{e&*2|FFwhSs-x4|32$nR~$DGP>vj zmX)EKj8ojN-vFn)8cz9rwbi`R-Iw$yU1h2$;#2@aaRpo!h-=0ih4sg5TauF)yjT*a zv@jKjgeDsYoa#9B&iN!x6#^U)fLFk&Ie8hUB&^s<0tNyWGTZA3rMv$~dHB2uDT*0> zQKMEa%D^d666qoXUs9bxU{WcGi_>F>UC}~VS_h-9$nIt1(8G&?E8;Tl_LSsMSn2?RNucSO;TV>SL75_1C)Ji|$~a#E`-3$GYO)&Z76UO} zyj?&x5lVO0Ix*PhQ2QZo+xns0s+2STpm5dpH1(`(XgVHP*X9+E*0c}CV?*sVk2C%; zQAJ||tr2I%fKS6cf%pXyABDXoDAwiN0iWC)1eWcRc(t}5#<_weC3vG4>kP9M;$Xp= zS%PAF3u33F*PoG&jbA-;vw#ApQ1JZzCMqvr?3N>lF$ZD&~g5sm)(O^gJ=9@=$@0LC~ z-r> zszvUg z60K(?t8Hb;uZ#~jEkCjA^b0)INx$1$S<#a4wyf$4F7kY3V}%Qkj|CDfRaGsCfN}q} z#T%{j!;uHnK1vI#pbssOn%$IaBHs7w|qm@qPn16IO?hbaaYyx-Ko(`t=XIm zh5BRsDCaor*(097T;LPqIj7HC?&4s#lKpEESTdzr06y>`-Nc81WkI0AaPArUP-#+d zD{w3T-2`AwJyQ)-g~$IvO2IY@HYrXLQt8ROF{LS!3<; z{7pQovhRw1qP&cFp#WWza|Zk##PsUX(Avye$1($h9}Yq0e_(=)#}HfP5=dht>Og#8 zV(@_iSp1#c{6;>xT@v`3oMT0HK|OW0%b>#|=}^og9KD&&(f-UVF_GmGY5EVPknTrzlX zaU-axo(gjUBwcMgn$liD5Wsd8vY^r=TF<2(%z#4`+O$6>Xk0~LGWQA-VWC@RRJJw* zN9%jX;)SlgeXCj?SyZE}I&@cRps^B|jS`uLSVk`s@tmW~!7I>8;u|7%c1Hu=Isx&h zp$#14V8lg5i%CE@_&>}E46@OduEqBx=I;-Uj{f>Yk<9;le&D;H zhvZ9W6IwVWX@PAuQxEahUTC~wlk#B*rA-DR4pRbxvDg~Q20e%k3^xtoq4mpn2e1*C zE2ZSb{9en&H$C1VSU~8XNIZ6Q4k-H%oz2)Q$5we6b7F(IEaz>gkHMkGdqR22onQ!T#ub>#v`AB6fyv~kw6X(34}xQx z)5{<`EIj!g9cT? zntuAKltbTf4**DmnvFx701-nPV-BbTkF^D!(TpT4P-rWc8s;q4I&2Ss+pX2T9i6=q zt2NTw+0k2V9rLuU&tcygueX)&ZJvWoE7-5RqRF`TO)X0%mreEQcjjYkzp)5DO9u8E z`Vrf2G+mqRH?wtZuzVAAZMZ6c0@9iQVvew8Emw)4 z7_9MMm{<{=r7#Z`^kv9G_m^$(2lB5duN|f@Tf}de9yR#+ef2Pa)mF2uf>-erZ52_> z5bQq&r`EMbmQx`Gf#P%o&|zRUJByofbs--ZUNN5z?}Cs!PhN)KVcDQh&!$`);ADgX z8#XXDg|W#2@H=dQjkP2a80%>g9*a1A&cd039ZN2e85^Iw0;Xuj3O8YItSaz&DVyv9 z{3__?oWm_6w;}l%^Hu^Yez55Zdaneq?2F|!JnJkCJqJhQX^5w-6fX-aBnN`dWot(A z99C)1Nm*n}v$z^w%lS%IUZK0{oPt?JQ0xlTXNH+kBsMmAYTV*s8_UdK6yY#C4+M?C zj&%q6mSy`~S7%FeRj|TckSZJ7APlx+Hde9rRx_!;dyA!KpHN%|^ZDEIapQp;}RL#Nq_PUyEI&8Aq%>%ab@^IAasi`c- zheSFY>23;3BcCf6Wq35;1k3j+H&RF(mvaW8J(RocHdPd&651SKgdr9Zfb(hq`?9Kv zk3$duO@@oqCbV9%P*t$3%$JQdFW5%_9k2%}@5m^92~<|jN-Qf5jKkboF(3#ZuRp>-;&LLt2GJ);_-ogC%%jQwic8v*lMTkQ^Ddyf$swqOP7@; z4oim5vn}+7>?pmDT8W@+XBZlV}lGEYwSE6$h4=zB!rD2YASUAN|Yo!`Uu1E+}3&tx%mD!;Bp-CJq!eo!88zJ}H&%$@SS4hT`AeB;xim05GAI^KuU+iz3KZDC) zionqnJS%g|h|qQum4N^SbXlk*#EiUS83rswAzp-oZ63O&>#D1|u32^B#Vap{Y!9g1 zhZRlv{DtC5zBC*FG5!WHDx)`K902wz<9POx*y@nOiUvzz3|)hC4=^zaGA6LhUSIm1 zQKhJ{b5cO9BwDLkCI-6%=CGgvffTkYwvaL%8QhntN~1y6wvMiX+_4nR@B%B#_~6aO zF08DEC%*o*4VPcO;cH*taP751{OPgB{*<42_F0`yERs&F^b6^KN9^A$|2wv-Kvzl$ z#aRrSiI;q2+(6hq0|`%sKm;Rd1`!vah!W~xI25A%4GFyO9UPz}fiRg5 zTz&OK{-srWzoiiqq$jI*gdPL9a|&_k5so2 z2+|@N@~>j2k)MmU7IAfdOa9z`Zd(j-Am2mWMT}OnCn;LTD`**&DJEi&jB}k8TA(qL zF%Oq?AYp+O#86m&Aofbn7>B6BD$lo&1@-zmds1q$CxdA&9`sdWdrR0bEb?bE>+4A+ zx|@T3;Z=va%gfV>S9siR4|Xj~mzQ@Bsa_>jF4p_}V&&q@a4P>`Fy(fqg82ti!@#mZn)Sy@w1tmLi(0H_#!Plz^goVW|Co`-UdfUm-h z9Su0DCD#&kf+LinX&ih6)*z?r1)6FGK%}>)uniGwV^V59&X)-IYHE|!;bv#aquA(n z`9uDYvrE5A{F$Pgu+pSV-=`0J5L^&#DUASO(D!6}Agfv|R*N;}f<45xRjKv(!?)C? zV%z%qw?~sK4q(G4{CP_H_O>!U8r!A=AA=%T@NBk&6Ox}7y8FNcrKXF#yetxnm9ZT> z%k~9nRQD<~&&|%w2aIK!!svmC@}SDBJg9&kG-njf>XyN2221kGZDH^S8*IZyKDQjq z1k9wha@YZgVhV!jyKYAAuWW$Mu&|plZ?<%pqfhiSOmHwjNA9)P*w3fDB~^p2%nsen zw{`Uvuz}djlxUS>9wb;aort}42Ax5#2Uvj7m+?CcZMdy4D$R29ei28gkQR>(F%wdZ zoY;BievUtes8f(-=H|*RkY1aLO7SXDf@2Sr;MmD>0;RSH zEJgzZvI3svh`s}k*lgO#7TONB1&D(B01+#@H=t!gLQ>TLu!6Azk&uiTD3X4>piS_*?Fp9=}FoVsD_E0$rF2e8GM85HoR8ZKq_JF4rX!YLm-&QzIe?wi?6xH zf!%De7qPM#cl__bOBs`gh&Vb$_8mL;Vjouzyl_?6RMMXdhr?dXG2xmp4u$Xrut}Q_ zQz)>guz%Nh(1uP|RoX2O>_X*ENRP0kdvF#AOg(GZ-;PmbOuvt-@$5Hx&{B-*oepFNl*gvMB&JfGd*? zv?&>D5b(AF&d)gOJCxJ29}Y;D@v0|`ER0H;5!Qa+noyfoL{ z)wrm>zDk}!Fup9eYU!%}zOJF}A?-YZ`c!=?*-%##N>n9E&m#yJc0BBIXz~junQ(+b zjnCm#ZRLs^>OPLx!QM1x#6eI5#}q)6!z3tovoEuIRIAlyEd*_i9I*(_igH{mEn#1d zN)Kc#n=A}F2t~?--R`y*&L&ujuLuRV+8aSeW|NCq<&jE9n@|G&Lui-9Vz(45PwU9y zGP^A1#$zwgY6mLwHq%@zH<0he{qCnqN>## z`~6t$QhFB99npH0r&X?r?0gGh%YJPJ_#1Q~_cw1mPc@oRw&W;>SjjW?$S)lCpmIcb z$q^5+lIPOn9+Z2G^B&I7AKpGufAGt5AJo&0a~~cu9tsCQe^2+$tK{;#G#i#xzuEcYQMsjS(@ySfEZ*>7ER(YHk8>eV}UuDJ&D`BK=# zM!&mp&rahg8G5s_?%W50Bw zm>LN%e<4XV%dYJ9MLxM!Kr=10Vh=A&s#?%mp$_DNmsiJVt$Z${XtcwYGZ4=y2IPeC zC5HLQ!kd8sFAHTzjgJLe-W8G~o%vQ2Y>&0#+`F%`fL?4Fm+Nz}ohENYWB5Sio;2d2 z!O^X}-_F+2L2-KiMX@Zu*w}SPJdO=$(3b(-HRh8;@GXm*nP*?v&II!{W6Zr z&|~2Z{YJCqVYx155lDi(bf6FmUwElS50tTHD5TL!krd$rlr6yownyv9URENTVreNW zLyJ$4%8&r|9H5&Pu3Hv+ta13h5*fKv4-*GU#9`nt`i%uU-0vXWvw)1xI9ws+*|a9( zmjGebdkF^(HQh2Lz=3>Jq7$$f0380f~F9j1DJmq4S&uV_#StoPeES53&sNZ`Z8ZxpVc^9Lnv%-PM@^PU!WVgXBP2rMq zxV)mNZla_lyHyR8ai9}ui;FQM2EhpFHyTJ*7$s34`VNOT zvlxx_8B>;0K&)f0KKIvEZ155xcVN>NdRXBEH>`s`L=b6lm<>*-YZxOM@4mlG`~P`vAJlVI1wS zKgGn}oF8XQcv(^a=hXKuFIY1>LYQQ6&V)_j0|_wGFwFp5IK`Gn+G^DnV!>d@>p-yc zsxjI36*EEru@N(0R*wrsK;hJ!GkRz{3{xidykr;b9bM<&8oK6-8dgmI|$Vh__he!@6QePtu>xZSWUnf1N1c8q_8+EJhNcA%f8zty_b!VY@Ve9 zA8%}-LQ=Hi0&j=l2uFOTYSXvAwdvYDH?)4|*|BH8(|W@mETOned^rE+@bSa>H-!T? z5DR+WgSCAdK>GpOP8a7K3U!Xzf^+I<>(ZV&AC9dPIi^!cfcjj=yGR2= znD`ZF9nAE)=1ULo%Lx<4P>{D2$n;!%)1oFh@;X&GUo!~Khhwx3U+Js|RwCM8TVprR zepuj*iqqdX*qm7#Zi(?jCm!PYQ~bTVw4JZAyYu9G+B&MU zj>a~iE_TUPvNy)L3#pvTRV$#k&Mq|HOo`*K0}q{kA-UUW~!i^2++>wLmW!ApC@;&Mk(kj~QI5$_(Ns zV;lGq5D;^`KDBFeRRy@43A+hX!~qA0+k%zmx#e?WaP0B%xrLIr zM64GDW%7BkavOAA_|j%_&8`__rN3mULC`R8>LK!CU8vpT@9>X~kB{Q!X%G3Uy>`2| z+S|OMtG2dlMYD15p5o`JZvFlRt9gmpGO}^why@Q;n?F%qo$%Yva5t^Z<<>U2jr(Ar z@05vkC-(&ceJ8J*IHgZUA=Zks#4_*#CoQ6X%efaNpT)|b06kmL#tj7Kutil6NXP1CIwMUO8Gv@wx-n_%d zt8{A-6eBn9NOE*}>E0b2G1}FW&EAO@^bP+4;>6g$;3C@tHk_-i6nqRR z&DHQ{R@1*#>Tej<-3}mRtRYlJVKY`KH{ZT3sDsyu$#7IGYFjW+iV>2xt>*1>4&)Ks z!877@@T=ac>_gf0#$Gcy&C za*!GY1q04HR~@_**mtWSbys0nfDgb3IRB;G&+;L5?ZnRe2_{#`qCjL2GeD%qx*R=^ zwlD_=$KVKY3>y4P#U&A%)$yVF=H`07!X2qlDAf_J>g0Q;@}2s0x}L9pb=7sH7IoD* zopoJ{QeAbh@uU3%)P%UCLG-^|l;y&_nY^joB#)-k#9w^D1m$4dXJMobmO#WEAds77 zO`Miu#i^ZA>|{1v02!a7z%YXhqXoxgS%p+IuM1ex(dI8i!ya?Jxp!M57&s6mi`@-V zY>9#`3Q|P?i*|n}54LR@9=;rB49R1LK;pH)pC3cUd{6fjLI|p;o!Ie7xu+2Df5sx7 z0RH>Mj-1gaZtGxb0>Q$0l$K`6AFh-kxE9v&ciQE7WT%? zmjR*>r)1hp>nWwhU9$rslja-6gY>(%9aA~ zBiN5Y>^$tH+d>r7$z!WL_t#cyjF`ZPHSko1r4BF>!n9+E5qQCNo*#{m=O5(Fm;HX; zhWU{SV+ZEnjE^gK<-hv{-hx>SVl(DL&x^FN4>P>x={%p?nH4$MdG3a6T5p^Stki z=66^<))s4-YI9e)Ek66#?Ug~FrL3kl0!hj1wYj6wh@JU-0&>khIZp*?!#wBRVa@~Y z0ug6R9hjYB$PLvKJ%numYa_INBPAv<&Y;gt#>B+3gkfld9I>;juYd7k1ndk>Y}>YZ z^R{ghN@ms4krgY3bE|gl-o10@$tR;`o`+V$7iP!^P8%k^f(<`dV#=2?f^N?rHq83S zzbOAU^0PfILmyrEyf*}_m{`D-+hj;@R$&ZbqeG=^{^FS#n=3%#>YgV(Xq9Jc4zOc` z4Zu;%#<_KuR*BV4WhuCL+999wmXTwqZJr{g43*#=Pkb9K*!vEhcr-`8X}0IR>TfYr z7IKSAav>{d*y^_Gn+rkRm-D-{Z}IvU-g%xw!_mQJ9Oh<*wV-GoV4I-L0zXH|hF0@r zz-HMcxk@uIm+Csa2F}3A5Nr%>rD(L`kzlJzF*a%O0-1#cOKB{cVSD}}3XZOKSAXuW zsJXzau)0Joo&hFu*fwzgtb@SRzOQ}l(7%=(1}3SJ{RjVd41os!Ul>EkX0yf+V%o8c zp#|}?9`U7I*nu4=9Ug}kJ%s~nK%dgqEF}b{L_W(JfqAM0yb4dvHn?Xju;A(P6YO>z zc!(t(X0v|A8_N|7<+7-jlB}hd43~Q=qM_jmA$KsJ6sTCGGc<`LKi=t45m`YM$y5%Vu zetj(UXEUzdZp9iO#fE<}+?3H_Y=NTeibb(Q)+@wk%v)q>X^bZ8j$9fix7v7PIX&oO zR-sme;6Y>p(lYwMngaBZL(Vqp&F@e$6i}PhVQYk+r9?P=$MVaJ=!U;R%l~F=E}-86 zQ8A_L=Q9fi$lNm959qpIFYoq7i$U*@*(Iv|6o|4qc@&G|*Np3*1*GXcagPNz9kEG?!#j zvvcuh_%Y^W)_fswP@{6Wk0Y>5u1{ht8=q5xu$e7pL*+qiM8Px+5lQdIeB~9Up}slg z9Yc%Wv$hnlr9|K?b#|9x%d3WcVOn#ZGabc4WJ3Q$`MK^&CJ-6o1K|Iv)60h zGUgj(sn{~ojLj)%Hqy-3r1^>~;LKm&uJ@`%1XV$Nk%!C*KX3|4h^ zx3u*1w6t{pVm9hrUmXZkM*@LJrnNQG)!KS!bt&d3us@Od4cJE%?C@}Vt{QqILB}$d zLF%4%d{qj*j5f$5Sp2NonW=!BHIot0h81q!#j|eb_{c z1Mv*JKu)TFc+1v>!w%?$LY`e93#!D>{Xguz37q6tT`yYaUwidl)z#Hqy{|Rh)7y0Q zI^8q#pPr>>GMUL_C7D1HlCVP_5D2>N@*;1&R2jL8!N=jej{{99&+YqIBu}z1?;;eh zO)R_OXM*E%M`7x|U!#m3p-W4*bT}|Z_y2omz(x{fX8==i$7c`&pZqvuSwR+AW+M_4 z$SaVw40KS3sY21(!UXY10vAKoiwsiCL{W$YIbbV-)d)hMHff91OcVq{^!Q+sals`@ z*JCx7`j&{l1@_e@j+CX5SkZctMkBWmgOS>oH5jc;vkfFGRrk3ccmF%Fd{d9LD7*RJ z)K+6#IRVgS43L1H``+Z~-^1!>-F#Cc?L<^r^>pW}bZ_4O5oo_DU+YdHw35VmOF9~_ zp31i;wtxm+<`SjkbiIWjbc&-MCbVe?$%K`Y%jK((#+HmDxgMsceQ~+0^kkbvbf`m(30Ta;3_x!ev-ct#UsRV2aMwCm;qtDRLnm z`a#s`<8~wQ2+|UvPr&(XWhtZHA$A;)hOQ`BVAXeE@H=7^_V@f1v>YZZ>Dl7LfnSz+ zPyJRA-iS+jFcBjAVX*V!26^~XXpk)D$s7A}40ecF{A(J90SzqK!Q|#j&;7jJ+HCIVa11lzW_>NsdwAa&v0PYqB#|vJ zVO)GHhKq;6ppn3ve?&eIv{%fU7-T1svSFCk9N{CfkQ{y~Al_K8uD4#9J-vI+ zZzaVdn?!lG#C8|?EiHJ*4epxH8aOOQ$F^8(aG3I9CUh$FQFtvw?(xO>$%&zXa%o}x zbtkqF6DD4T&=#O3lelD4h{#IGM7Mi(XAqL`MI?bK;KNdNlechmL8F{pJyy5fQGU2C z1N>=os^+u$bZauk&2JfAVLq#QM#STlx<>GD=!7b|9ZijEN9ShAF?}t`)*%(3-!LL< z;uHwM;8aoSd00!BZQQ)H zSUge~8y;*=do_B@FrOL)yE@d_0y97@AlXvtP2jke?&G*(NkNhZ_>hYxs(h zqBoR|oDaC+ID#9}4H;MO300%0p(%6aVpA&`NdJNp9!pvGuM7Uyt5?A&h0B$ZWnF0+ zSPd0iCy(jZ6BPZ0La!Jeg-Sn)=h1fbH)p|LvlI3mwUB3~o%k5IqI0CVqx7ZR`poGY zC!0$5H|5k^?$k8@5X*MAL^G=oJbmTX{>~G#{8M`MfoB}O1%FbnSbzCsDb>+gy#3`T z*Iz!RzMJEZrJw9Mef`Z7=im0illffXlKg>Jhd4u{ zQH&F!9d6~?fYDHN7Z2EpkTzbzB$XIm79B9irPG=Wo})ZMqe~ntEpsu+JyS985PpRg zB6w5}=klR)VZc-jEJ1fqG#z1bRlQ{*9!aSafo0ph$(;^7yn}a;I<+)|^MHIA2290_ z^c$dowbYoCE*^%`7Q@p2?S9ZHz~GmM346XDI!x-^4>ZO7-%l(wq#kfPrLYhaLz|o8 zLx~}y8Tyi~LNkm|;ine>lk?Drgi(Dx)ln1DNcH1*5Gl}j6m6v76wpLR1@cJsv*I*C zEF~Oen-Um}<{ez`XAU4G$$Cd{;G#ZaV-?sA5Uh2$hj%kAYmaaBX--m@>RjvJ zYQ6CnYEquKaJHenu*Vy<-cnT0cj2dQYMh=rGzPCiv8(87E`H{_r;0%BfC57A4U24b z@fdV}C@m)Qy}#^9JcHf?P*2bp5)Eg3SI-Rotc6YG^W~LkdbO|I9D|HPaT{8uFhq;4 z2Iabk-d|OHn+L^m&RxIV-QRod&DK$NGPNoA`;{ur#afqMrPDn1xhQ?!KtrPfitr*RcgIoT;6MV@OkCE-|?^b-p|yyu4C?y8seT~FX8m#*hYY4(Gr?5(wL>5cS{|RP&jm+M3i|F z-dC>yG!~~WXm`su=-LB%5^twcR^wF5vKaR80-OGRZFYa{_AMGoz+Jful2H$94fHp5 z%q_)#xd(pv8R>I45xS`EbkRrQ;`OIa9zV9ccx0w9TJF!3EPPVWz4I!bD5{3|#JCe{ zIDy~~s{Fkci0B&=i#NnDQy1z?LMP%A1KCI+nG{3^ebG|EC}Qj71X-blGYCK)O6D3p zvbt*rZvxJHN%g^Z20`QfWQp>6j0Gp1rBcnQTtl~e-=1H9Mlh&Np{R`|Q_#Ix2b#q8 z$uiAL_^!^BHu0%^)@bCo8UkH7##8pZMcr3InUu{O2<2=gZqiKDy%(lAMfFf;fQ5%_+4$~R6W zOY_(pcK{CGmCJ!QfBqi(iAiyVq{lK7Xxe z^6gwlQSYv_16OuRR3EDBh*x@e=L7{!&CrzY@ei6(BW_2KqRJ`%epfc3KTcX8>n zRoVLw?ex}vvLdg)gkrd!p>*4DZ#tX#@Wt*!`No#iT60t9)bY{M2M=|1bx$50=$W7B zjUq8n>#X5!>9^Vwx>^0TJJg7xt($K=zp?J3D7=XFnPY?f9qkQ0jXlU|OJR(F>AH1t z1RCnZFs}IYu`VR69aqV41no{$yh*CGarF_5g_1V$G*4T_L1<(!MlP@-fZE{snt-r` z%XOQCGN^FnC1rpCoq--a8C+oVvF;f`=Z3Pap@%1%WHRI-tPVmN?P2*N`X-|IIuR;^H^g?f4Xg;tEH>G zWfmc!r8z<1(SdEJ3Xhoc!x#>g|-k!K=CQYBv>w9O94-KA}?fqR|M0tZ@{OSMa=MBfq z4$hlFt7~!IG0eZ3R^LHqdGPha<0C`t5ZBZgZ%ecpAB@e4Sts?60mBQ35r>1W7eq7A zk)48p&KFE#M#KqtMM379jFK3m(OgO#YC=NWZ?ufoNl+Fja_X55<9SAmEIeDj9WW-} zAyOMm%@iAxn0y_5A^x7KTp&9KX?631+M#=ZdlL=Z&&S#x^}P93zpyjxqfcXddXISg zRokRP$PxOc+sA0z+R=sL%rtsJ_I3_+4f%Gd&Eq|eM3eZkLJ2H&lz1Xab)?*`>p&ZM zX#@%O2K-BS+DLG6)k*+L43DG0o7~j`MB!1A%c?9!gL|iZ02PK|O=xgrklmWCh+-sB zH#*@i_^|4jf;)z3Q~VBOe+RlVi+6PlzudMmr>fiN#T}t!m96)QZN*W@x%&01-wl09 z?X3E2YD}`BaCxQ5U=%zEh2Zcyd%-4jt7B3NJOPf=tgCV<*9ZIukp~rK+xZYG z!ELw)BM_0FgDe_6hb$UAfovCazyn$`;->-f!;APaij$F9=xp)!Af)lJ#pTnP9L4SC zC+E=Nuk_q}2KhesK#v*v7yLaS{Q}c2L9RZ^S`Y8FhCv$A>p?+Lsjg)OL zZz77kiKvJ?<{s*I@WW5Jma|+&zF^q43pj;9hzi&Bq7T4V(uth;A^!3O}1qHpE0{+5*1gP`Z%&Y4IkbYzW5> zj{+O`F)anxP`)(KC(t|zHq2+BcaW3EL;_I3)xi1TXG_eRMP&mY#Qd(iSPW*Sm$4^| z>}^MHbocZwdm(k0rB5=dx6%pr@AX|(vAxYVShfJZfIhU|yrgO8h>HrE>8XiBZ00ji ztAd6etP2GdOALv9L*@nUG%h`BY{&qT7|{UnDp~yx-eCmGaO0dIlY(X_eqn@%bPTQ< zBo5b$4FntF;BI~31O%ws$B=9d0;P-O#u`8|bsW(RP-iXRXgLJ`P99NrlZ&nEMX^~b z#eTHxCJi~oE>S;@Fz;KTP7w-;wMv!>VqayU4u=#*I>IAFYClcGUDKiBNwHZ z!z7yyWTk8|_p0c(BE53?Z0#vnr9WeL($YQZiHZ43=Sn|qZ1Zo4Ok*w1`?jdXIfU{W zT)IMZSmd*93k$u_CWc5kj>^C=d#U+_$@b&misE0jWrYeFwy1Xod3wStIo1HZ?+&hbJp5e58QXs|+d^ zfL2tMIYUesXSc;hRX&viD>jl_sLR^Rr%8| ziw#j{2)wseVUtxzcl*};R3-_A)Kf~gBit$j{?E5Zj4Cy@^h4Vrg~-0O(3>y|pdh{7 zx@N0;r;N(>+1p8EV0B6Ohx%4xB1BI^dz+gutx9)OmF7>-z|Jntzs`ENmTx06N3F#OVZc4Ba8x8(7cPcqc!6U&L{&kLm9iT#q3PXwQp`a}5XI-EJcY#`-1pybX7s8xR|Wd`L2x zD+U~(9_xEhqkp*EvS42FhCk)t(mD0qgU1W@mnrKezGJ)qLGBzc5K7c|VGNR8;sw{^ zLitX9>=iF6!R!_<%v~0Wi-Q|07<2wS;Tn1?R!&zzu0ly=fJx0Jbs14e>11c8G-jRN1+pUx;1D(GN z;{s}l;T>Wg)LM;K!g0oKXN(r3U3HZYbFF;K;32vpH*3Jc2v>uPlmU_KX${H8mJLc0 zyg#nLw?xZWUd>(v1cai7NE~i)4XCdS7`hS092C-29&Y_9usA3V^A=A*`l>Foh+HG$k9 z;FAt-#dSiv!KbnRdud+=vDuK3H~3guJa zCsbT9o11ZqGFYCL%@4o8_Z%Q3^cM{wxDrjUF)o|uK_&wZTz2IU>3KDq3j(yvXCGT~ z4{S3&4%JbVYcMDEUaK~Gn&Nmlc2(=atrY?-92&-KrLTInP``T6h02TGRQ;Wp&)glr zGh3ZH($@uzS35I;NXDrcz_Mtxk#ldgt}^C5%h49NhKO!{Pk)BL(npQinok2uC@2b4!6OTM{x%AkDmwdWwuD`}x8b&4zp)4%11o^+G5k)AI})F9=D2=KlKF%Rrh z5_9`a7d?-S?6b2(F~SeWFC9B4F@^>nR=!o^OV#gC(`NdarS&>4f&Pk}58*d@v|0y# zg+|HL&JH7jrXWu6Yw2x9H+ij^d6mb zimKz#va`@WxiL31+t;*mdTwJ91`}pIVXo;7!duo|IJ)#iKtVrRGA-CXX_MWoWu7VO zxjb6uU*i{efdB6diw8kwGBQ5U9#fqj!*d=ow87i|HQddsx18~Wd%-!27Ho2+1?dMN zafMY-f*^m92W!k;F3^}?FBMsxtI98SD)WZu@rc>}bZUcHog?GoDsxl1Cf}TpSohAn_Wd{1{u!A9Z*YVMPW)(SHE6)_^FhsCVv3 zyB57b!wCsR)`EhSan8lf2S5;%o_tGM8O{Rz;uuL?^hVka$LS~zxcsVS-agvUarSIS z!*hSfLrrfJ?K_l{SuIC zN^t#rotdg}f2OJyPfg?t#7fp$brK z=n;7AW+o1e4COnCMz(h?i}<6Hum{H=o#Az}!JsDk+B`-jcm~?>l7wLUu2d|8h85Q7 z_kruG#mr(vNY2`3k0D*uUD*||BS+_(t?*Vqzopl8>SV6*;w{TJw6~oOZ?flZQKV$>iev;p3CXhX!&JofFB9Bwc?V zrc0F~En=5*h*cIYHq0Jy5Lyd-cOa$%+XM)YcA7l7BqIL}w1Du3MkE(_*`<`g=PKbO zQCXV=I@2PyBxE51N`j=34}c?Bzl!UU1_-Se`Z9fF++G2wQyXkVHd+^~wRsP2pY8$g zkbIQtY&qdmT*q_6g>5PSb&a{BM{|wOIQFW=!|B|Kv&%6<@aD6U@F$zn$I}<@Qys;z z?umvM-S-|t@8+`x-IJ$=&p}71$;&}+JK^OSli@Do=fTU@p7+RQPC783>J`C1;ctoO zl0W00ysXz%0(b3 z{-{-yB{bYG;NQ2a7ndGg=a}bX>ea_&*t3KAx&O;aJO6iRG&C3Pay!Sy#^%Q6W@o0S z4o{4OEA@7_wWJc_Q5Xb8tYEOWb7ZnG&=gICS1OTT1AnF4^FOqTqnwK@9K^PbwzBY@P(0n5Z3f9?2hxfaev4h)-aGb5a zmgqKAFmXUNzn?#j;`ml@pa&&5xkDTp)RNHj$3G>sbfO1G_|^`gpvwuk6^>ZkbP_6{ zG{c?YKNQ-=nz{^tG5^-$Ww#dCR$KtWzC#i}GM<lZl8Ne{mtOZJW zV5;pF>_pU7kDvS$n;UOXtyy=Tzb*~$!*(uxL#PG(^p$QZ;|=Su9PMy}H`$2P(1<<2 z@HhLYU~$oIcdq*BZFpW(_PA77$}Q#Kn=HiR-hMyw!WYh%f3CN@@JG#G;deuZFJitp z7^*(!+jpu9!gvLi^FW~uit^6^1pFJZ#qbMJ7HQmwR4ld)wq16tUZ_HIkV6Avt1kQ+ z1p#ad_fP}*yUM~w4WVAlQAk-XgDIp6Uns%PvuaLVExk4Cyn6G^S@lp! zskf>JZrf~HZ`!;~JwWna!@DMGyo>k2*JSv@G#=zX6hiuo@k8FF)WYb};KE>GbU}qD z@Pdc3n{Reroh`jpUB$ekw|g%uy&ZDMoTbKz59|^rD$v+C5)V})2BjnLy&SJ$BJTx< z1c?eh7IAfJweH z+uqjN)QCD+J>iRyEXe<#piY&>+#IqMD` zp2&41lMBU(V~3BSPs4a_Jef@bcgy?`4ZxTah-rI%2+RX1^Ct(L2G;ce_KtbQL`!55 zCIx{H0SuxQ7EIIu(*n~s{=1#RAt4;w2^_>;l5~vp1e_R&roh~h3LtQq;K^#2pxsy~ zF5ig_>9xOwA3pY8)bMBJRdP?b1Ei{aJivfrD{{E(imrL!=P! zz0oCFFg{W^hZo>r_%E@>zP`rT?10KaZprLnRVq(4zd2jO*6AQiYUV6Hr(*c~HK}Am={$?fw zf57D70(xkom96mUmL>#nm3Z9xfm$2L1yd-raah+gj6AqV76_2UC@!4>ARb4$0-8D+ z8xR!+P}FW=*n4$ZH!<~0H8?!Yl{6?(r&|X$aLnWC@ZvEmf!6T#)xe;IkcWb0*9Tj3 zj)Jv_pS~5)P^eKgM36EHk>IDlrJ!`Aq)C{ceNgiy7K;yd=k7J#vtTxDkcHxL1O+{l z7!70xVUZwi8fa(&yoQ;)u;xblj^24{^p3&a>lbHEPUIpgmn%K>cey`^zb@$ol#+sYgQmuWF+t-{`N-fEPC9)k%y0uhE|qIs?3&l=*?KdPMiMUtKAT_T=Mk z!e!P6UE4Nm=qPitj=>r`KIlZl0|<9TSPq51MCu<#AFk?B$nh&VF(m z2q9=zI&To3NC}{^u3E$4hwtD#14vHAUML`B422zwv<)=t)jWWGxd*c&4=EJ#R^XQm zd+J45H*Yd#9e^$PlSSaqR9}DH<=4$U_~46|&lXxbr_W3-J#DSO^Y{yH$*-==rPJfZ z#>%`WY^^C(Abf_wpgNfdVPBE{PFQ@^XZN@C>~_iw%GNk? zW)b9*tl2UgS|A26dzJZJVIO@Y1(k*-F$UJ2o>Gp7f^+!hwpbh`Kx8jcE_h%tgT$C| zYg?>DR|bS|*r};}?5hXR$2pfc=kL7?-DHfyHh?U;?Qb{kzV~5RkB4O#&I7+-7IV_o z*E9;1-lpDLda3$$=`Qsv8a%8+*sH=ECeA+R@d4HF673hLDH_(^M z8lGV*|K+PL;%b(9oJg!27Z3LsN^jV=nY;rY0VFcukIaNeblmi~XQu}0gS6w9czJbT zFfkNOT*Q`V?+nIR2o~;KlOE6Gs$6c_7F!$#KuBqDMW=l$!cGH_Q@M{I&)ESmiv#=L zm3tujslbJBd!JLrZS*)Jv2Dt@4YLl^yEMA|b+U76WUH|OnZxH6hOZy&U7s7Bfn(=! zVK}dC>%vs}Xl`VSSpnS(6M5<1)|49n-c12@n77AXAiugTuUY2B2k6zy&o^ji`2=j5 zn(U@ndp!&q%PVcQMi{9xi>mz4_Qw1vtK>J&zB(8BPLI6a#2C|C;a5L9pp zs>6I^2_NGh=&lK2f^aoIJ0mo7kix_XzaQGb|I|(DFP{plua$-fA2ze|2H;g|Xi(kb zcC@u2(8H0knDWUSAHK(GL4`z|&43YX!AoH-nkf@tWTw1^LnchrW2F3XTft-31tw)p z6Sa{418!EaDk>#j_61VEBUz$!n=b~Ip(6}r<8L{2=VC6mc;~6NY<%JAuRhn;ckb0s|H2jZvrDBvpT6lO zr%%1~rs>iX>e9-y-g5c!Tb{L|o(B~~h3*c0RDD1_2_ruCF@!Q24lnt!p!nSnJv3wf z`784S{zmf@=f`9&TBC}y-&12lGEp^|d>{o+Ev!ijFVm|D$c4#hxRgYo-BXR{fRoN7 zAC_@*UaoMB=LA`i6YLDxs~wWZkf|Sdt!WX3-a~lQUSr=j5ZxWXGFPOKL771uK^2+4 zei`*snX{$u-1gP4-o{yGo13NIhd}-H(!=Uiz%ra6;)TvE=%*2A_Y-b&Z?_apAy5WN zF?lfqz*3~iz!E2FMx|ziXHi_4J2E?yZAU0@5?Hys+gGB0M%9_FxisKD2AjkGdjL%)g1JX)EE27LD}7U~sow0G;FYdxc^qG*L0#vz(+wGEh=4x9;iP`ocK;eqMRDB-rvtyh^}HEC z$nupetZ=w7Y&r5!<)eIx>G(7rBrTMvNJr$9_oN{{n3sH3OBjKTHMmU?F+zxB$Fp1& zAXzC1h&Y`8CMpN=ccA6ULVjrbf?|4?uiO2L40$t2+7zbz1Bm)FnEW!0`fC-7@Ir}^LH1X#=5DlE$Pd#?FBWg}_1od0 zaN8nWfF`|rtN>2&g`xJ|<42|z*lc)VYT;=E=ZDuW!-$@~yf%7z_^kRwLqqA~t*xfL zu$nG?Bin8gz_ab8Z-``p-s-$0{MW$QSg2XS(;`-KbG*5UMzZZLC_2U*&T<^Jn^7`- z)S{z_5v4gjpujU)smQNmF&2g$2u}0xC%pxzHpMBR=TVJ?!8OG0rSdiWH5)w#mj-kx z@D1X5pq%_z#d-vl(5wK3e31wSaRxR}3yQUa_Ph&sfA6=nK+6~H;0uH3MNm|~tJ0;X zzWbU#IaB)jnM*1;hj#7nJ$Z8T0riT_SN+i+Ip5nv!?3V7SL5&T6K0l$u5L5C>Qs&S zP-cb~gW_O1IzPl$CLtcEOxn_69G4K;#-2dcm{{i48d_yG1?gOGMpIO8?AG8~6Xx$&;m9ch{?5)7m$|ui8UB$U;Z!e?%5~vZ#b)C@hm^{tooJHq|Em zjY)4a?FyLysMi7cK&wK$6FR=|99_H&s*A_b-ZW)_RZ#hDXh zZJS@LMZc731EEvS53%Rdsr8j5PI4%W_I6ibh-7(9U@Fcyrs53rV=B%<*;E{sdodpb z2m=hFN6WSo5|37mxi}a046bT+hskz1w=(rNYN&vjQvLs5jR z>>ex#Z?$`--fLwc95m4H*;vwiiJH}>X8vLkDU4NBoLb%ZlkdJp4F$_NZ$EAHj#hH4 zU~%UiGye_x2VRO3M_&nNK{U&8^r%LH@z@A$?i3Q+AP10TYIO2O_`e35px6x|8xNUg z;8CzcM9M)c(~3Egm>6oKc;QHD)TFI$u}tfsA{!+#C$dQv6NXnd2>U{L#$$FmOjofj zP?HHMm?MGW%pWiPUhkQg+=3ufetP;y&+%L4bGi9jj`tjyp3e8*edd;zoav>zp>&*e zI0!hUT2hfCPkZ_4(=UJ8kw~h!;dFCWs&X9Zcllx1D9GLo{UOg|XrQ}`xL9!>OZ;NJ zJdZv(4>tV--}bCUs9)ZZ2-}AD*B*%uM9kG)?wtzondK8$=~5LZ!`a0U1;7`+WU>nm}p^=R~QsI$SRK<Uz?8pi*}%Ya3_R>An$gh1ZrTd>&HP$vCttM+Z!FGAG`)0AgrLT zae#O*D0n~=0MVK5X+A1|q|6-#yBQ(2w-t;4B6uAb!T(UW8;I>yEf)eas2BjSGfWD6 z7joMDVNw||u-+Z-%EzQK5WC!u(W5I-sU{P7zQ&~6w&NqfqAfUy;UiyzAN~ks6Z(r! zjt+NsrCW992wyg}gBOoy!Hb~{N)|Lo8^}Y^Vi~a~n+zkyu%p#2merO`mYqt)v?~lP*e1&sn7(bA9v-d&S;Tec9dr{=E*A{ zH^q`n-?XPxM_e7LSdACchveC{BBgNT79gogA8+7|((Bl}mB8{^(kdowD|o(^chy8V z)#4rRV_Tw}Z4OfUp)CZM!MksQZ|y*Sg;a(7621k|1>UtI`L#6wkLv)Fx0PQZr(!2i zNBk@%?q(ZcLHV`MUFnR1{qDz;UnaR^YZ`N!ya@1%wZ1aHohUPQC1+!P*oU<+laxXtC4=cZHu+CL`kc~?Fr1wib2KSq94 zqEc;k@^KH7erHQ|qH|9gw_qi>wku%L_exG%CN!Xa!0l{@KHNUgj+jTLEh)2S%Cg#~ zprfL0#NHeefh2vA1_b;;IxiA=8NqC}i5iPgTg1kVFdyvlK$-Y_jTK(M5U9%Gm~?RT zUq(Q_uFPy0HrNWX7oD&q4M*w%u>56jZ+G|n-pbqeBe;Q-0rk(g^}dH6F8xletN|}~ z?TSeo*u2Gg+@uYV)k6+D?-aWcJxiQ|o9{%e^$@1mbm!Tvgz+sr1*_0@Xk6)1PNY49 zKvy4biWW5#7IZaA=u|eD+k}xPkqG#OzICrzBO|=@NgUe+0I6(4gq#vz8v8H|z>K() z6+rMpM7z-#h`JNJ_*g*$)b31%vG{-Ti-XQo0785fkA?DO0Aq2m#5}(uJUbW&D+-$4 z6yM~d*x3qb2o`t%dusq>-kn!%V!opfj`7d~+MIhp+Y)>fS6zH~DLfXm!`l!XpUWY) zXt>}{es$itG1K|QK|~F8@NoM0KvVbqXEuogu`S@>{opn_WQJ5ZuI@plF5~ssW-?*(us3!J1cMUA?Gs8QIfI++Nc5KY z;d7iDD&Rr;kB4<96Zn_)L6vQ8Yq(a{9oRnIgWjRfg=%o11(sz_VT4t2m+-`B%n;Sc%hvb&_7rbOLa4P2x-e7wd;Imu7;VEq!g>d3?R}q^C1rJ~U(h zoDcn;9n6PbL!)DFJ~URzSZE}81_S?FjfZxZ0?VssI`cTafzUyaDkt8K48GpBgI<43 zrQfonftrZMe{-7=ng%}@fA3WvWn|TGH#Q5Xh^&%7Etse~n~Q1|l7z=LiL3HwrXl!F z_aSeuOoyJ^YB5kjSA>|!Yw-XlTB}}(VZ%ubgo7sRZ=k#+L<98Mj~fWRGYIVMf5(6< zzuDhz+e|vI)<)cel%Xc8!i?`Kqm;6UdugzmW*Bep39YCn+`bHaUW@ZHQ{(V^ zb>`ANnI0cAZ2wF52Jzsi<>v&e}Ns4G``&O zK?Lwd$NWRJ0o9uPEB7sve|>lrD70&sHtfCO!`$B<-3vaXeebRgW^K!qy#B<&43Ci= zF{!eU_FJ}LSJ10z>oW+q%Cn&RwVZlAXY2dRXx6%|H=n4cSNf+rLdDQG-Fy>T2hL(T_b~bkrrR1j(1S%z zM(kp8@wLN{ag%yRXh6n6E2JU`T0-+9Z5+e10aY~yOA@~*vuc+DFt!nNK3sMxbWa9W z^6X&5ikBPjv{tbvcVTnJ;TPe6SiJ*&c#+oN;q3rHy%Wvt2raD}d;1~ODs%<86=~QB zy=vM`8~K@TZAdaeZD$spKt@HR8TNpKvUp}yAmxT>Hx(KdQZGd7VU7iomwD`x56#3O zxw`sb;PK~zfW`$m?8A#l#}jI7N9)7DZ6NQ1o2rHe{f(oiDi;TF-h86~7>RpGg^zp} z?y%EKwqVmQWEfQT%=VN!^(`WQFyG>7rJvDy9y*|XZ9SN7aXSi;MB}6Ft$74v3u*On zbT|)rE%KzJ8U9v!fLdC>d>7!KOz=R=j}er^Udth#0?<#ZF*r)5_2D{@pBc4BS=@59 zq!&;JwqTNe%GR+OOQ9TM%q^7~-SvoDhy=0<`Fifc&ni3#D5euX2uR}S{0cjUWF^u* z_v7w=Cr0{sqW}awJY@Kg9t)uEEMth@ltI_8waie@hx}8Chqr~M)py(;&hJG}xS_t@ z&JIY3wnQ5-fc{{y+euRE6cJa@jT z>ie2u{Q~pi7%aW?tp47)BlCSGZ!SAMG1~O5|1tKG@CupOQ=c-8tn;mB8q*CilRD=h z|M#i&(odw>S2hwFEIs8!g}>#Ijc~8?U)`aZX`;p*x8HWliDT3E&fGgTg3N?`0y>kH zbkaqN{>dAN8dosN=}vH?i?dEFym7W04LqXR6kFecYa}o#9@(F$)deoliziYKzYsSi zm>#^CWPtH_IxZMT_Ia}hHn}UX$s`K7k-`~b$vu0G0vXpKm0YO{Sm=)DkpSIo16t2wusi2_{3-Nzkz#P#)yE=1J^DG> z`DBl;u~h)q@|(EV_&W&C4$@WSs0x!-%?2uCXx|1eX|_Elt)iPTNdfJlF+^cO+C-z` z(NsVdv);&3_3u0)oGf-H65Yi~?=#*xF__5|juuAGEc6dgzi6x{(J=PGjrEChcg;WV z6>U9Z?ak?q)~Vt2)PW*+*}Q}!cpPv zb?30H@Hp^2yVqj3Mtqd}>vkeWURveG8`)l>l==3a`q=i8#g{8>$jM#~y$Et;>Vl4>L2`g7_1kltjfu>T@9z+M9Dghg*azkyvsbn;n zg4*xz8pIQgB%wd|2KNuSn&5VOJg=jXDRrv#+f$JG3_C)w5U#Q$&^Bns?5F%cGcX8r5}0)I5X4sWrk-prk>fRaEyfv1xcxQNWNl-HB+M4)@Xd zt%1PHibf3O;4tw7G{~v~4-`MW+Lb_l@Q&E2@d9qoM6I*+sW)%WKm)ot{jmL^xzKae ztK5P4B7L!UJ>$+hPOlfAGyj~!<2c#=rha|05<8)0e*SJu=)DK-&zlyY-CuuU0Nw(& zcm*rsNHH7-IGXRAg-4KTII8yI;GKHPEEq;QW_jmD08H!xU?Ks&Y4Un1ff~Mfp(ar1 z7hzQRjpP2q4s(ytKGxqiohT6)6FtE%5F#Snx-~Rr4&X~_OsEMs76YEJpb6j+Xyk`= zEC$iyz=_`A(7}+pbv2hR8S*xt`zCYhl`-db6rweu^WSf|`IhETXuddiWNKn)pextB z)Ut%%px3bVhMwyNjrV}YXF=l&>}vo^3Eqfpi5sZ}xGdc_9+MLj-R{LYplXshJyIlV zbeo}RhI_T(2v$>jRFr((P(L0o?7#}YF5TM3T>sdNvn|Cxc{$eI#=C84e9k{?Px9K& ztbILc^)qgdbcsmE1I}h+fm>hCVSYDeb$Y%X`rptSB@$zCCHv${)w->UqzOE=)e_X8 zYo|2V22xL#su$r2nKn+hK;VT8HV}&R*|7P^>og<0?9(z|A$s03H3J|Lvgc47#p@A- z;c;=|rbfPofU*kup10y2z$Qcy@clL!GzvpIA^f-4rXJI_VW#vb`is4I!*j!Y4xxqr z;5=UXb#)FCcbuzV1R8!${CRox8_b`{cXeQ@NY#vaIzkW&2&ptYXHB3X4hq06bI`na zS^dOfaOzeWz=a$d>W;kSt(NNh0%@3WR5|0pe)_2qa6)4A zjch$GTZa{C#>8{`kjY_oO9k)brl(cPtcfDULAbLm{^r@@rG_|on0 z!G%%G_&hf+eU4fpfy64ES7mwX2rvLmG_jU>M98ZGuj2?8woXmP(V!?WJ!-7xv+@*4 zm5Tx`=w|U}%ycD5)_Vn87jZdrs`MCFW;85eXm_o3sKg{G$1q&RB<67;Vvw@lwlCZL zi}(CWbBBtpUo-({2jOFN+Dead%9K#A`o)JHDqYdE`{<)GD@$PtH)t34)UEy_qk#Ap zlDOHPCiO}XP-lvhc4xsK!yPzHCyNXgOpx|&3Ckpz3AoNcY-5FL;!{`;^?K9DbeuU(H{c{k6`PTRQZjD~=DfS6tyV=Evgdovjv(E>+D z3oNpEnQSCjHh-qR#1HMGA;IorKkmWo#N0DF+%qLIYY4Q>67BH9<6Rbu2-BN zz2~0dn>BH&7kK1)XAxf#y!$V3B-e+(j4#_C`aj%!PY=}4{vIUoc6PL9TAHM<47wp$ zI-LWN;>w`Pf;S=#4tB?LBb$Sj!(lX{pdb}NW8S_x6kX@)@1f>#)%gm}ZWI|MohmdsGKflx8Yf5l zp|3byoEmuPD&prsNYCRJYs+I!%op>T} zeL)Zj@ipb`F1Q)m4J_iWbC~|%rZ3h#(l!}?zm@KUC;X(X)ro=MyNpwD>3VI5**D)EtG7rc{M9HxuqK-E)xfNU2}C-rmW z+T&HvRb*#inWSgfmY`Ds-<%WJgFx4vqC!x|^eQV}ckv!~cW~Va$;Sz9!<~wjiA`om4 zKt4)M2_28n3CmxDzkqlH&Tui3JWadDS??}ttq`988K!;(KYYOnW^Frg?R2qNMwFAg ze=CKz0&E1;2BMS9rcQd{wpP2C;MUA=9+4NW5*Q_F|)8>xK%NG3NF4bMhnb5m2N zbI$(~UOREMGxR-2YWJ1nHcw5CAL@fF@nwA(~&thrRD&dG2(d@&z}?sY^&pDSC3nOo32A+OjDvH%IeJifB!%x!kp zwpxVCKyzL4H!^!3C$k5+Uw%+q5D6#qbiHwe_GoQu8jK-aG937V>5=fJ%usi0&rnNu zLt{f?dOTYg$Y56S=}ay#jWCw&8EVh>v1RIfw7IcsEI)QW-+5L&T6%6iLz(vs+{PbB z-fjzJUL(AsjV+C^nY1U=N|R1$`;Ps~H>oA_AW5iK;P*9Tp8w0OWnR!fNY>}M^j0^* zzT5XvPZtWk7&$$Ipc_owRS&wY_+O?KlN6g8Fo#QH5lLtOTSA`#U^Jt!WOMl5xP9Q0 z$U#FxBi?fux2T`P-80!!KW${D=7+=nvx3cl2hkb933v_QB9E7ItwGA1I`N@rA(eTI-=l;$2aQJ02a&V~0iRw_7Yf7r#eUWCUG2yvit zB5i6CIUO>w&jE&NE+WAE`UEz%!mG+x;Tb%RizDXOR8p;b?u=fK3jqiWEM)(_L_xnB zxn1BTgaMAQ6*S*Nx7Xdc2eS_&>QsO?g^!2jzLCTi4*az*{?r|YJ-wh~dNg->WclpS z`^;bI;i;BAJ@s?pmhMBn&V|iiF074rIN@LM$laf!D%6Gf(CWkD>+4W&U|%xrDWItg zTi&s~aKt5x%Faj!>_yMd7xOaps9gFM4;r+AU_`ZPgq}S|DJP=~)AN-GRH0cUx@GE<060 z`|45cwk`cofc~|+ZWFFBp9#SJ|4Yz+0em|aYFByMFPI_PWNy<+oKbzuxZo@TjpTx8 zC;}>!dlXwHt=%@1+ha8!M!+&0DL|E-z`DRJn*sl6a_7T@FFj%+X#N_B_=5vR^j&ce zk{rgyWK0eVK5RMf_E6m&!K*xJpX5~1JLKQ6w55}up88%nC4p=p7p@@2bP4C&gjs+u zB;9o8vh56m=-85_n?qe4nP#>$!m+Rj0`W$ev>3=B$|gh_8vIe)G}b_&?0NXDc$~5JsRwye?Ov!`eOTiUciOgEr7>raiz85O-mJsZn?BU(+ z-rs6&t~?$V7QZ&SVyj`4mfOJBJIyvrZvU3PVHZgkaMPUbL5nxSd$$n+GjR0nj;s{1r7aJqaA!}@e<*?Z+$ zV{`kS(&h41(9Q7-{M~q(R%PAWmAj#@dqn^M8_ez6l6>6mwh(Ca?R^XTc)7XcTv^aF zoI#gCPGo6YDt^V@vGbj${nrzxPk;Tce-6Yjox##yl-53rD;|3ccdIz}Lw#F40e$H2 z-Bd#gV;Nv@L`?V5ToNvR_-EKyn@SH(3yO@Gb##3U2Y*efr+x{320G-q;{4;loRS)olTlAR z_L%T5oXhti_ru8ddMo7wCsx4wBfVs-oIswYo+T4eWZn=DWSSqpE6!L+?52I{Cx`ykzP_BNC%dN#gE{= ztt0P2?ERQ{Cb;*fK4s>CWB*scYrZdXBZJC(oI8vvL{_!bu;&hu3$~9KNBNN(nL)CK z%rHD=(0iJKme&K`k0)>^M!Ui-K!4K^sdL_=Sa@0ioykzTU8-;om09;t)58^jlwkhQ-ft_kKE%Q*b+Xl~4WM_5C0pXR|W zzGFF~S+X~H#eU7}=8|?(VJF#=XhMA)}X(_C+ z=Z4wo)_VkRoX0=^`ORB2zL2JGmNRV9XIPV_&9Q{NGc;64f4hH%mW=fo%EQ4kv5i=FCb^ld6=*C(p9HWNNZTHcQH zY=EcwO>QIk3!?W!~G@!HbWb=7(7#4+_#SP=tlCb0K5**jeV{@x`e2>VB+MPTpZ z&Q{HVy_Y{Hd)FRxZ|~*5w|ggL;$eZu4>KnXk+LjqU_tiYhHvuoncU*?l`E(3zyIm^ z%*x!_->tYSNQ?Nq>$(7-bJo}^F&t`AH@h8@yV4ZFU?YD@myO&EVl)=4HQR?C!e1M0 zar}L7j2_2%RXiw<1dKH5M`YZbnpN>AU{VHzFz{XIrkiVJ7@Sq%=dlXrS#T%FNRdUz z7xpGeQMH5~3dKei+mOvJU+M1YWtKTnX;U0tslxD5o{k8kMrb)iM@% z+&9Q8iI~!mmpdDH8br&-wsez-PC&zO84G#8{g-^= zUbKqwmo140mE(Udqjb*Zi>lA55^icheSma6(BFd|rlj`3?J<@#E3G*}=e$68n5(GU zTqC%Ev3^ME^A+oA5_rX0QadkowPLL$aD?AMw?=AI0kPjV0olUt9mO?mS*?09rpk>Vy?g3EDEHM8uI-civ|yt5pfU|oM%^1b5Is@ zKK5%|J!P;p4h@xK2I;4UhM{bHY7A^zH6_?0$8TK>M?QJJj2INAPdQ88eW=dUm>a13 z>Ty|M!tGq0=kb^xgURQ~qX|Hmv=z>5df{MWl8K0z1BWp9Q3jKq%2vtj?^eO1ZLDE0 z8*umcYIDP0%Lrd=J7L`$;yO##UTfmcGKvn$?pUbHNisjYs}oJ9ZGO1DK~lARMPxNc zz5H+pV)hw9inteTz#kz4qtTQ}4>uxwk+)N}5~)NnyOJxQ{NV4HRaccOsA zv-yz?3_;2SbI^0Q>?qJrZM`-CJ|^ZPy|ERb>L!{4l%v0iO*F?jqK_JqegaT*b~#hf z5o1TXV@0C5MVo`zn!9m8o3eu5G$E;$_3BqNYNg9qg`T2bdR--xKyNAZd!Wp3S4B6~ zE|?Vsy?Ya*`p}{gURYebVm5)8Ot6*6;nN{&FbxEsqRBBy69V_l6@iSZiK^%DfIKO# zUSbBLQ^svefj%osh(vk zQ9a_JBwZgub})4wQc6#jKaUeiAr1zQIO*3LHjO)! zHAwqesCXRR!*IHEHsEv_^M!P#7F>SN;{8@$wSyIh2LGj1i(kW1KitHN9(8^hh%g7m*@-Iq5wBdBUH) z#6Px=g)-_r=PMoT0;_}D`e`nL)k~K?5=ArU>?7&U?R^+}jV2IfL$LdtzfkEjO_MjC zK7He4lliafB8xCdEHxIc=F^6-+JBo zxAF6B_@0H}Zq;iS75vt?)FI!15{(Got|zz{E`yzZ zS2TnT$9tBK%q@4vt#}90G$AR0WK`9Xm?~bKyPx&27mPB9) za@_6hB98_SS0?iAHdSZ@mV`=pBHB9Y^(C7!~HocteB<(m_npZJ=24-}T#3p0l6#8WsSbPmxZ+Z6uaT zB2fBD{QELP>E^SV;t@#YpiM*J=ChL05!;cODKYUa>U(|V4(@eAE>+|hb+;W5{Adj- zF9#A(Gk1Lv9rEd#@GnCAoCHje#>F`*?7f%y%lj#mL-lTEZ6LctMo z^0onE6Jhth4|ktCF!qW9l7#)qNlfUPL+i@+SUfWvjtvbDt4W~82Yz*KZ7`J@T$}sV zjsJP}u9ePX7e998eTlZj&&vSb8*>F}d4(M3C-p5|n!WJc_R=q_^B`p%Yk0NrgLajm z_MS#8u_1*leBteqt4Es#(+DF7Tw!`T-0co$nbZw803ia=h6OB)RxI2itJbE&!&ycpJCJq()*$TR|)yrqG zI`aa0v5J2b26KI)q6v=+Lys46@R|b(EGB7zCPS!|vZTpD99-7p6DjD-sNN_1H~Wh{ z;s9L-zz7r_EtM@0rXQ6#EamctwxZ27#0Tz^_&^`(l|h?5sHsD=KbS{L+MMqMU1pj=m$Wv+**+D? zl!XXLR0e@)R3bl$2t?n*G9HeWtw$55$}mCA_3nvUOMQdAo@bw&gXp7^+A6&1l|*;J z9_Zucq*vxl|5yzg^yFJClnIT222ndZHarNc70IiC8Gu%ImGOA}3i$@?{EZzV_aO%B zJ{(F!G1gDCS#RjMD-oTn1p#&cTuWI;kF~0PueTPZj$sxnLdYhdSJS`4 z&7eanKYW2F4oeM%9y#03nco28HWG0blSL=+jkbhm+r8~`SK5Jk0Q)gC==o38mZI=6 zZo6#G<#9wUeU3~C4II7yrY?lYG1ndhBJ5f7q3t${lVO!g}$-r zd}A!$J#%hh?DG0VyrDU<9&2e#c1^AhjUDN0izRxe*QW>1A0N*)pYCX*Zsmkd!T0)p z(N|YPe@L0QyvRPJ?dgENO8QzVqK1CH3ppteZYavC&{t7z&+qOH`UKW{>Cc9^=ISm- zF$iK?V!FTuft3Npvtp4^JS9QgU&cHP;pFb*kh7>XK?55iy@fQrG^kYFv~FnAa?q>WCBTiCmdW?wFz z+y+7jeNYiU7Gn?rFkA?Nj^3s6BcPqqC0(_b3jVYmG|Qk4U|;T;1KAN-6kW=?E^c>Z z`#ZXx>%F_CTz0(PH|@qk$JOsrwyv(s&#||t!5_;3rH9!fx>+yvIFVR4xL|}i4xV&{ z0RbPBZC@=dFEuOEj8Hotq|sb`80hp6f%n-Mkfh_XT0NHtSh3uquyk!L=0`J;zCeicq`p_HP z#-q!{BmKSYnPkFdd`~YRsb#SXNmZS=D=lv)t#}NBLs6ZBh)_)3Wdye70wgja6^4=l zPlKsTs9TnCHS8TwD*{^RR*%L_!^fSm!r0j2;?kA^*2FYDX{^>8qMcpMu}t#%R5}?> zfr~fA^GBEG+Ukqjr67@K=kH9k4mLLr40I=x9l1;_)s^o`Z6|umh4sjV6I0@iICXVH zy&v}Jb)h%2x&xor%}gB{Yfm?%Y_-nJ1hUo+w>k=41rRzzFdVt@Fp4PkD04x)BsBbt zyyNeS&5pyfhW9HyL%0+$-@wom_0k&(;z(5+B&Ol+H zFg7|eJT#c@Pxt3L@%uL4er-msd=w2S;M78HEF3R~lQFLuC>qu5 z2JKd&&|J3yW|`!E_v9YPZY+!JfX_jvoq*sAnX>ZD3|zXyY-G5e>VVdjZESCWqTbaI zw|xr25LGj3Dkahi#f!8HLMFk1~>9!KjWodw9|W)3q& zD_jS3f-tNE7Gu2y+?r4MXr3Z1W*yP)eeZ*f(d_=+;(FQRTP;m%uK+9Tg?fKaI|J?` zt!gJz>30l8k{+K>KPonB4`#X4qsQZItx#}VARmKNg;v`%BSrbp4gYG9r_ak*>Om=| zN)n|h8U$uY;6ytlPt^l}O+dMkxPE}Z9VQ#Hvzq1<{P5GiuM5EP=^J0kP<8;OPvf;f zLR9G%BnPrR_W(C$Q33=Wu0!AoC5d+}S5r7_@y&*MG_Ev_18-W?1s65KjLYdtbn8I55mQ>^Vdg%%55Z{NiOIbRg-siRtpjT{9H$w=R zqMKrhn)P_B(&NZj7TTjj&CPdphGUrobtv{>lT)-4&?N~JhZyiVQJSvT0)c=UL?cpd z*mKs07SyK%5Z)S)r#I%%6t;3V@Bv2RQN+M83N*@=u@XDQW$`C;Y)7A9{uI#Ee8`sH z9>gZygWsMfWk-jzTBK$*C35&4{`JIdwy3UuotCYZ7De8^?#b6vvMy>;{K2J1w7h); zXnQOCw$G8C!At6K7uOH6JTf^kJk+z)yHwu&)=oaJ>VC^GDEJ%nAWWGzeQ??RH{Q{p z?5LyWgw@EG;ORgnZ>`S50SH*@O&%gFX(6n6_(tV&{2nRwai-MQ*a_h1Aw&LVfINZl z(#1-6pzNd*PmYJ6;U{Af=B@gE^`Kf*IO=E!D*)Gn1rRhR(092zluR}!1s<8+jk|;gzi*Nn z2jGShNer5RZE4^VygwT2c0f+m{@$S!T4tw$)DYOQc>`or%@^1p={4B8d(AezEmEU$ z9Ht1+dbkkdBt;0uo9bfpFPB%V)NXS9f3bQ+y|2{Bf>rfgjlY_ovR&izs=MXC}%@|OIi13bn4o|=h2g!`CU|uf+%@e2a zXNqQYaxHtBtumT{iBEIv(0(vKAcLjy_vf3&`Rk+(6}|zgEo|Kd4%QQVzqF3iEvV+PDyDac;&3M%10KL(yX9j4 zCU^HrbPU+T%gU~@ni_vVrLUG#9UGy4tv-#hW@Add+Rbxnne z1*$gC{|sshbWYe0NnDth^MZm6Q%O2p^)#M?zX4C;@F73^_UJ|ZDE^i{9B7`b($H_f z8a=Pee+eP@w&O8aG^V%%Gkc-DqOWNL|DYf8wot4iXiq!f9EN?o+gxNtu}5w4RCiV7 z^N-Xb*RrCHr=Hk?a1GtEo?}G)6K6G0J3#IsCKXlOKJ|myPK0NBRg5ivQT^xLkFM-U zNbxDSEzuxORaF=6(5Hl~v+dU5d6uog9nAuu=D5{wkJMa?x53;&QLqH%V5|V6xQ{^A z$}xYTK|{M`)TPrT%a?)iUd|8S-FOaA65dz#Z#sdd`w1dx6+~z@c3(k&R+Zi7w%h~T zkl>z#g)92`S2Gk9ufn`{l{1*~s*z(AtS0Q)M&AiFp!Tn*-r#n0bxtGx)8E%w>?(G& zw>FpcQzpNqmqL4k!K)#6Jw^k|wg#gatF-`+*5);f;4;XVA(yG~9xT)B#(z~gyIBL? z$I%3eOa>!s(LA|&v-N;jKgM47;O^_~mfuKqvDY2HU6eR!m0XAM{AgD$m(S;NU2_Xl zQ;UmJQw#UjVmy!bWV1cp*=%=ldb&72J^j>!TksyK2Zep|J;8|y^*OhFVtj05Xpo&P z$}obQXMqW%!{nF-q$cZWD9_XnN-5FGHd@EJFqQni&{0<%2E|x{aHC%`N;~5+WgNIoQ2*- zZ6~lVRXjATe!%TZCfJ*bvzl6(6T=v%&VmlFO2N)k90pI3IwM^^tksxQ8Fw*8!$A3? zc&MDOn9hP6=!Mb;@Rh0snxpvOg`?OiwNrF@nkqFoe`rI*!_jyF90s%V3W1t6fT#(} zqQveu;U2{6?e!xsjWug^I7tLqXslGbQj32`ZQ4_%Etm!=p7n<6pCm>S59OVZ+r!(Y z#=;h$jj5{JCjXde2LH$i|45-RskS?COQ=w*#W`5^K+aLzc3BnAAk$%BY}Q{|re)wt zsjQUmBA~V$_19s4@|aiPzMak@HVC=Q4FHkQU)CU25=O5^EW>s}=mqALL3)4F+0WJvd{b6^VFiw0Ts4FFRS(;~QqEO$@!LifD}lao>(#vT9N^ZJdbcaK&_I7T zd=8ksHl?O)h11~ zF?)HlyT^ChD<8FPRXD0+6+?cOrlZEsdY5pSfucKbWW^u(b2hiUyfEHH*XoktHLEu* z4aD0|6zmwF8>UsFeR%QuV!2Os>gspH&y-ktM!nU|%iP{fTzadR&P&X06?=IOqFkhk{y1#o)@*YByk<($1@+_HK?-|{c~$oyO0GXEnN z?t1NYuf0o8oVZYO)W=E}RjKq!_3!vE?iksK&W{QGwx~C=CV(B&{JXUoPcm|)DTO{$ z@O5xN3-r2B1TBmq85t~x(oO~+=-yu7Jyoy3>4*8W%w7tF&KGJwPBOQ`^`CbVppejlmPzg~J+y-H{`roJk)+N@scb_gFvPE(Kv z+t`5cNHm!v3zn;--lNGB?I*pVla0RU)i+uBJRc?QvqDzvc(-jR}2cr_DdjOVmHR2q~;i=Yb_0YDSI5I5Po0hKn*-> z&*3j|3|~^CrEjShmA+f(Y1)v40URw-vS9()acpDgh}=J{pOnq0IfJ=4$9 z?ra+O)1>i0L8%4liW+Qgu@VnxxY7EWs=cl@9$?JuQtjuxBcuPNCLZX*WQ7>0nKTx_ zhZS#BSm_%9oCwSa^f5!^67Yia=CYyR-R4Z@{Jm1m2Wqg~0P}OVMT_961iqTVh9I;M z6p;O$3J0#4Q}MCm|MU*!PXie8kQu(V`BooKRA^1jJ3ZTF>A9i!Ne*ROD z9UP_{0UbI$r2+Mrng_MXdi4caKjAVZLL$a!yUqe{92$WLEF`HBt3sWG!szIldK4f8 z#@CS_efjEV&|~+Kz-S6JC1SR9dltqY=Zjb{ESE<|{66#d0=V)u(w(imnEfAZ4tKER zAGGoU=Y;yG@QZlpAGetIoJMpq=+#g;$D@Rb-@s_(Zx|izq1qtlfFF1O-2y}(g(*oI z!zO?#;c-m33M1DKKYAxeK8koZejAv@vo%bNJJ=C4U(6J9tf^H$@VdJ%e;)smnSA6U z*x~DNE+2#MxFz)GNZ`hF=>*frTU3j!)Zv6o2OFmDrbjY1=|Qvn>izncP?zM_XdjO+ zMmhq|hj9$iu0z42OfrD}L~UWB-qYg7V`Iu~p}PN1dtU+`*Hzzp&+Hn_XdjJsjrM)9 zc3ZM_CEJo6+wq>oPDo^1c7zuh+0IsyvV?>+giv(_W_abotbmaJ@=gd`R~ih#^dV~p!?wG-`vc& zo?ye!kSLqp=jr!!Zm4c4%N6~eeV$(*Jho+hw5GTI*g@{!zhX^P{g1;b@UA4wd>51k z5wUK8hx==|_IfJKavlc)K?QDb!K&~Rj)$5R%8x3hXBSI;rKGth-8)G+t|wcOT8e03sLZL4BM8l zC4CI!uk{4LyoW&Cn7E-DGr(j}&sX{?FfIa^${va%I@Za3uEC%xhKNoy0J~NY?(@v# zuxM~v*THXQ9a_uoS7x5(b1*|2JNTvR5i7oXc}fB?87DzE`(Z_R;+|PqPUNZlM0}n3 zj(2Q6c1+fM4cmpSJumh7TGpKzPHq;TsjZ%40a#!FU+*r>{JOK!P zR_0vdbL`Z;DD@gcAxxs;6Vu@d`>pm_;-KJ2eVc#860FT*?&#)289|3QA#wc@Stxf5V%4J0{`L6@Xk zWO`)0B?*x&i<>=ZO>uznVv=W+B|fAYRCXXCQ(0l{-3SgceG`*r0po(BuNxFVyI+qs zO%7CacXgN79-i3!y#9jRczs#&YvR?(3URii($ibr*BJBVHUW65#=DIe@1{p+r`uWye7oaU!Rp@PUw4Z@2Gi9wQ zsbjUkG-YAzWDm^@n-u=>J>7-Z-T3mCw~sXy7PMcr-d?Gl$2XQ2*0hx+`WuQXCr<61 zI9l6(!%#)X`r6%-lVvTv<<))7_OjIuU)5gO-%+0vtY0@)*R^-7L8Wm}vO)*e&V!mW z*Hd2uuM{h=0pF>*x`g8ftdKt!5aNj;iVS)X>@L8LslOp2NJWhe&rR8zI*?=w_uWoo zD8>;rX;h?-uR8f9G}(LutKVWem`2R*#?_`qo|D3k;&iC#xw>m7veiD7nzhS$o{dRG zp|`F-)(}I0(jba}zVH{K|K!On$xn#`gJMK%Pd>Kk*=O6uH!<(+fMpN((FWtM3Cqad zTqvVWLPmpV(6HQJAcU_DFq{AkgDRoe47NnUFkYM3QrSJ+Fl?F4vUQcvNjkPw6OgFw-yej$0_&`b5 z^-~=~`(`?Z4^&j`8Hlt@kFMWXO9N=9NMq$|#<4I-Dz5HMUhR*uXGR*gJ8B6YNLBM_crY4TBnu_3J&6BVfH zI?e#-!lfYLQYeHMb>N2)j_*F|#mwol2{~L88kAn^1DnvKG>#-L4@4U`5A|=X%MB$q z4Gm5_*P-rcLGSh3@$Lm>o1VXa`~KppYkyQTK3LZ@w5g(ee5k2@ za9ljvd+@fY!iK7%p6P?l%?GD@`fq&co*UkBdSg6yQr0ZQG#RhhEOmQ#EuvW@NEHAA zL@45AaHL2Df~s_^t6Z6C!87K8$YQqxe~S{14Jkij2>h2OPO4gE{YtZQ4tk7z;+f>b zo)2&8y*P>Cu&_Ni2W@gs8;oh=ezT&fF;*gs;lak~rfKAu!CA`9fgPb(NBwkf>Lw2d zZLI+hVqhm82WA??E4Xr-)9DEQ%FG;KbpQgv;Z|_q>YAG59NkDA)Se0(p1GGyw*cNq zZ*#Mkfe6@A8C=YrRaqhxS{I$|qfG^o`mx^LvHD0s(`fs3hYQ-a4i0W@D>!`J)XZ2# zeoaqeU0;0YTGPCCDBia&(NmLOF*YOK8*lC@D<5dDtZW}BFY9TJZ<^j&Inq;I-7`|T zb2|AzUwCY>De zyv`x94~?RY-;k$eQ(f3~TGo}~w2U^To|cpQo`Xgaq7u#;qj!0%a!6f6!F>7dk^pffwpi(CvZ z(GJgqYQ#&K;#&zx^!^_lXm06)lx4Y&iI!bEckbGC&B3b|?zm%NVSZjbe)#0c!-r2k z|AtL>-FNTZci(&8UDz36NPZOaT?Ic1Va#GRpId$ub|R1XxW$i-V6y*=c~+4hEsq!E zv2uf!09uhd!3**S?_vNRYb;r-i4+wQpAajd>>yNVJ!wtJmQb0c>LC&H*d4GsHb07_ ztwFM%O+#Gh22^4|AqpN;@kH;&Pl+*Tm#ZpDwr=U&Ta(w)*-^A%cW+GG*t5Ou?oHi) zU(!`I)LEQs29TMxymJG_$H)_?Me#kn&xr9Z((T^vu8yXLnkwX~OW{EY1F#waaryiZ zEqw{n60$Ii@5H1@gz?IJJ-n*U%uytD>62M{Q*TS0i+9!oVGX!uOj3x&07JlwI%LJp z>qHKL|6-mliqg%iSZ*4`CL<`se+5wPN&sidfp>|48uP!wYBehBwZJHT+!=u?rD+&op zX{E?bD8Uq180vO@viN{h3z?KlDa+XGz*4zJf33}FHSUaND@Zu%Hotj4lA6qHWtm4k z$U7Pk%ys59MvXTtk#}?`JqIXE%u2!M=B@GonphTc1crfxN=)9G-Z3T7C=>{iQ$rv8 z2RX*>%|R54`6YO4tT*2{Xx{IGX{(~@d`6BG)QQ_`KB8WPi+~T8=C9tD39mW<<}{wm)RI- zAKFFun?a$NBj9@v3}ck5chw_>t3L+ib1uCQz{gU=#8OtmrJA`V;lzne3n303yQX84 ziUKn86_3(hYtYat1`UNE^{9~%4VN}BOf=HRVRzPnk@3YF48dC@2tFHCZx}X}musN6qtyHht_po&A-eP-TCobq$ua*A*AH zY-nlPI#}D#eY&|Sl-vA)>B;tO&l{M#t+1-OB(JEnpsS&%YsdO{YvBhD6hv|BdD@jX z?czVg8oJ8MyBcEl^)CZO6^&(8jj>R;tUb}(;D0a>DnYbKNoZ`c+iWY|w!gf!Hl8EG zwdJw;X!$jREnE6){Ql;$ve7!;TCnarzY17~?bCPuH(&CNShi34GkT?L5*Gbt0bGj1 zUQ0oa^x(8uQ9BO75t>A)F_Y#5+7eJV5HZ*+#cnIc?N%`9Bcr`i26(F7ttg2wwuNJ{ z6@L@dun{UOQ5N=2f``ZPa%0&X-Ge%D1b-uR($k)~pLQ)@#goj{KEu|05BIkew=@JB ziu;Qn`|Qo{>HbOA_HXok<4-Ro58i$|6#~@t9l|`H1pgR;_M#nmK_PFnw=zbpxDYzN zh=^znZbuz8C{*#$O@e3`D`Vkwd7v$AA3U`nXY{GoDOzgX58lI;e60E~~YDs_S+)jUQTH@$Aq|Q*AOcb^XY6Q`7WF zeKb6lm)kaV69seb>oeBA9)<2K3O|zaC~UoIR~_9MW{A1bYC(r?W&DNv0KI=9Av9tr z%S4ACk@gC&;c5lYx0Gkfw#FSR3|hr`kThNR`X*Zo3tK1qZkc?)dBbRB<>(FO#>&2_ z_V%g1%H+Kp#WM{PH?CiQ<3s~S=A6HXIp-Q@%sh9FFFo5vC|Tf%j`e34kEKZf@ZlZE zL&y`4tzfwdJEeis0URFQkD5Q#m=|RuZK7qQPTEK6#Cwy!79GjgLoxoO2tD;MrnvFV zc%U`E&tQHz#?2ZcqD%+fObSWKZlRqFCXLme9Pvu!5ff;YXg`~br0faMvga?`6w+T?ptW&X6 z4KnR4HlT~4hLQN`NAf#*`{Uz>Mk*>s4vm-gcD3eL@7ZX9`#`{d-^QPZA|XG(dWZn; z3r2!txrH{!>wQT|xcLn^QoxT3c=URd11RXcfSChL1KL@6or+95!m$ZoOUoq_wYgg2 zT`Sp^FYS0wVIR?+yNAY9Y7rz+;R4D<-bz>lwUC{(6V(bIf!eRD7@X)HLOQ4?V5T3M z)&4^9>Ew4OCq-hCK=b62NqR>J<1GID2-eAKm>GPiqWCpjO1CJi30$SJIui0>{Ya2= zb`pl82*&M0%wh}1%{LA|7R?PIa9leP(j1Ijjx=54+~F($Hz{nBCp8fANZ7w4fCxVm z;Z*2$Y_5*Pu*_&Cqo2rRwe%HL$JdQb?tNZCOa8Fm+qh1gO}<%rx$s?yOa8ZniJzF^ z+%OL-NVDzb63+qhfIEzk#i60i#MZ*$!-j+g_s_(pj>899;-d==+GSfPmccdPKoB4l z$|Mx1Jb|RMJlJ;b#uW{3u3}&d0#M7jB`9HlRUdwN?H9%;C!czrXiolfa_n>BneTt! zS{LG2zcKxoLSK5XSSv~H5MI&J@x$E{UgffAJ(o!Ci~1Eqd6d~?n5eV=?Zlar8-Xp+ zOg+bQ1RvltDsy5-4C&jzM`~8(;$TMd+h~O4~%plyzikM$4At#77jT zmzYtaeX(!Fd?o>p1{$1gE*Oth_@u)Z^JWjT#oT$6Ve{8AV6GAO?{~4!7Iho z8q&kx4bt__ukwYp|05g91pue4{6gE%6m$&-)?P70VP#q}4bUqY++6vvHbMhDBm0&Y z=CgQQ1 z5OXU4Z%Q+arq|RK-^^3VBK{-3b+_4CSkwYT&Hkdss-iCy zRW-gAgruT#G?5tX1hVEn+~fI>+=n*f;A72=bpFtor2=U{<33S@g$DnF=86OqD3^tl z9|5)aRk?)nY!U=5mT1v71cEVttjR9K0|{^OMvQJ3mB|gAV@(BFx8l%?E5yjQJ0o2a z`+BWy!Ba^;XoHC$cX%85=V(;Y9zAg0DUSAW?ko z*8x-OD4X?mx3$n#t!a2$2Sl>nR*>WMm(;DRPpoe*_l3g#4LJqty4#up{-T=BMEyu>X>Q(za1;Tv z#u%pdb--ZIcv9kgl(keMhsOCaU~gNTQMB8|#Yi0tI7!)nCkn4x$qu}67}-W(ceL@= zammqKIu_Z@(;3+1#xz$nnhh;>+VJ-2_lY-%$CBH{V;eVO(=ZX{J@Mk*3yrTc27;C6 zAm0>;~=}y3DBOzCz{mN18~p^ez)10Y8toPh~^5$ zvfkVA?S`)j6Q~A6=h+iea2?$}bs~Idk2sM0)3N^kF<$oo5pg{bn=jWRV0_9XH=pk7 zTmlse@DMx?EH>K$1S)B*-{k(P29D5EvI|hVg z>WM^_8T;_XWz>fiz3zn`YR9|Y1w!tDB5J?*pngZCi~5nnGZN%I$lPja`^s`dV5-U3 zp?n+pRUBTM&*8i3-|@Z2=(Jin!7-&N{lVF?zw^%Z{2AmCqB6?0Fnd3f1_%U&V|nHK zYd$gCNahO0aIc4262|mD`QX6)_YXX{Yv#pQy?92h&Bu~Eus-L+YxpPEs1>`YWZhp% zoB&xjCxr444j)*?gxqk(6$s#>M28bEiI9CX8r{E3FZK#H#7QA?0JHB#4jr;F4ihLG zu!NKVh%d-pkb!0|><0Vjld`f0n5!Au0-ye3)*wX9c*r@epa0~+SHJq;CqIAijc*j< zSD*XbuacL(_B91Zc$;$Ij*#(wc~(Nm4viJkl%o_21g0;;X`#4L;{G-LOq9tVmpB!f zdz)GN>4O3PWSQ8Z{F|Gf-2c!+*#5^KeN>pqUkb4MiWYnq@04Sl&BFgTK@9_ph%Iz^ zk4m->dj&TgE1=)(bhYEY;}2U&6{VDpC>pUfuZj-#4=VFNV+YOTSK%i7xVWlIykb*V z^0rNsZKa-G#__QV$EWeKNUDxBhgPP3;BDyBJwkemQe%%9z}CbJsSrb%56aSrAvceV z0H%MDEV1FrlG>f#2kA5Y9S&uU;jn08U7%67psVpr?$}hG*V@H&|3?Q9I zYJYcE;((>PvaL_nrk3UcIRgX{+lXMj^HfRynFohed;?yj1+u%UG#C)US=&b@!Ej-N z&16H>3zL=B8HV&I!VYF+S!ye#M(S8P&-xl>V_u1QI7mVjI}|QF@lGp8#q5Qmgs52&fCtc*q~>nrP#920dV%FC3GH*==Se1@bLT@}#Xf(L;W0@W&! zx&kISpl679U`?1EfQkCUArAsmoy5GoR;M7k1&MgGbawZX$?a16@n?Skbr3UmcgV*2 zjQub{PS$cOaA#Tov@manE8=xJZ3GcQexeXgNFV^AGMPuT(UALl(^vOgVuLf+l`V#4kdEB))>v6lV+l}Rv5L- zwW?TPqPi-zGDMUz1wCW7)CiPJAao&w&C@1pAmorSVxhHQ2qj{G7+iHBP`Lqo73?J7 zlphBjuoSNzS5L4U6WPIxRae`|2!%A!J-K z^~+4t8I&*W)Rx$nW!O+S1O50;XKX$Pb9G>w z-aheCX|$K-&6Z{vZ0jbcN3&Luc#{jrGnSA@mb(Dm#-S>=FmdP;W?5yW8-v;zhBe(C zun-GmVmK-iur)RV%WbKU;rB>1nq#3+J1@Ynjg+8OCK_F440m@j(`IUS{!DBN>B3gL zJ9BcSq0z`uCi9|6XDV%=ROGnQ;WTbs`@F8yj3gAnrYf@Kt#%>EZHtl$f8GT=i!us7 z<$@cVSAP-Ko^sktAn7t0y_V8QTV!q`;;~j+jG?;)jxi)eEoo6c3$kn?t ziZUHyVW0(PZIQ+O++7G6L#bW(GsP=c&rE`rRBRI-#?rc{2w2MuB7urZtjnFl+MaIj zcri?;#m?@&Rum>$ z(V3ci>M2#+6BGKQ6kW>kC;tRT&rJLivY$$(%jTc5cTeY_(t6q)RJH+J9QQ``Ow5Yf zTkFZ0`Y(wprmdMf%3XV;6r|yvPp0uSEn4HO%quk?hu+Al1~mwq8yfD<^Sa4QnXG2e zFLKLTsyB3c`bBB$$je+XHCWO)-B;gR6J5WhZ@RN2d7BGG;EZ=6M{o!>7mraXD$P8? zRJ^n>KMXcO>R`z)tp}p;#}JeofdgO^JLQ8Lg-@W6Jx~N-6hxgU0&Yzxd%>Ngj?gZ& zOyn}O`~XxVqg9n2iDAPE)nx6>$;4;j<5C1Y$1xCFn4wOhf7w zIH5Aj+n2tTi?j18Z%a5^{>+_|_l8dCc}d4hcNX63`Yyl?6D!S}-(QjXvnJzf^kAw_ zRF-qiv6eu}+SWK6fm*IQ3N%>**DfzyG4YBJE3L$Nz?PO>RVL2kz29#Y45D1qm$kb> z?E?D-wg3q>vFvv4ywS8yK&&!hp)#(}kxh1!-u*I>pnNRHaODy7F6recN}y!=(NeCC zQtkAqCH)=kH4GT7;D-Q;i~ubuissrn2%U9rj6yvsAbAr75dP~}W0wViX$i73XVSNj z@8R{rX8`3Yc^?R%_78a?F6HpjVeX9a_4TDbOEAvER+%*UpV!csFV4Ll#RG!MVO zGyq9m#8M#SI)^KQA%TQ!DKLZ$gi*oU#yv#l{guE_)-f;=6;h=f(4y`V zjG>=Wbk7-LIT#Y2^moB(uoAU^4QwTYxnXFHvmy+weLgk}71LbQ(6TfFdY1yB;UyvH zX$e7*>`2JQ@grSf`guoA7`!(Am-4SXB`#)*ey4ajafz8K`wz7-m~9o&G3E5X)j!7dh*L z6&TLa1?vV%AeTk%1N|TrDd0uLD8x5;np>KihlYmJ{6JJf63c%dx%&~^69@#8;i4RG zxG3Vw3l8q+EzP(piJgtP-gtRlpg8A~Ii>O9K)AfJA{^-2dRZSYU43LUva3|rh>ZG9W?P+9awdNU|@oCkQS&7`36|v zU@?SS?6*4%nk48_b5`)t(07X9QmFWj^6o<-f zAV$Dk-AHL=LukBoxic`=@a!}kq$9#oA|zg|aL*ev#7TH0%DK2ZbGQT+gFb3slzd~p z@dXpU)$ZEpfD7=0Izvw%qf?Z%6}iC7epqaM{u~^<>M+?=?22tk3z2lNOeJ<7 zLWhC`Sk-KF&<(AOtOpHn(Ss;16}z1}n!~M*K%>LobaZWoAtWM_l8$^RpY8a-ng*;1 zU{%1h08D|n071CRIG0QVVfvpC)c9TgOw2XULGN`IQ&`He|B!9wvTb4imI4NgtzoA$ zT6EEd72^aFXe?;}NWp~>q@7jj2baYODFZBp5#*RIcN?5R_LT^U!b<>}zl{Bt!wH&6 z08WsS%SuMlh6XoISj&8f6V^Br8%`|gMYwQ+#V?4V6kt%C!ZR8GY?|;QZ~^%SYQbo7 zpd5&mo}ZYY8%%($;de-MHl6^S^PsDP#Sh4HX!8_q9|ld`Wk?{Q+x;r9q`F&Fq`U|; z-Zo~l-{EP*cGQXw!Cqs~KR{_D7wNhAE`x>#7Dr3Nt#Om3;a(1}^}0JdCGYhx8pUxu zXsm5He9Y2SyB?jfcUGiCTZU;#+0qZ$d50Abr1{*n&1UWMupw8~eAWu;6vS4XK|_5y z1q#x2)E~3~8*zP?Dj*L&1CaZfS!6>)Q9*7FDTCDUQA0frt+X#s`a&|;r37@s5coqk zH)unBO;RKE)V7?*#AHAPDkXaMVMvM~v{Ypo&j)VF+>+kieO~POAo&aw=$sjy0;%e= zXCfs|sGDILlmW_%)y*j(%WjSHwaM@6_~jIpbB~H4R=SFxN|~|O(`;6e=%ZqBb*z|t zW3V^W3luDIg{~=j6|r=K2n8F3KZN>*76EIku)WSA5Rqb1_?erjk1EFXZOaCDt5QlA|7!(t-VV$b8yh{X zW({-F4)%Ap!&(9^w`5~%qhtG&S#L-aw)Kjf&^q|{#1WWX1#de>!2`pFApH;#4S=L{1J2U6n>CR8Wcx*D!xd07k`j8)i2zJzmJj34~x~b&ZU1tg@&U<**@k&*Gt6 zw8WZV<^h9nLpLg%8FHcy$R1cca2kDC{oopAul%gd=VZPbYST=mD}dh43eIoh3@GtY5E+aPD&4Wx+ihtT!9W`f^9Y(p;2X+a&~J?q z&cKcI<)SEuf22>Og>nl@!4NaMlVuRJVR<~mSYW&nYDxgsqf)|#|2r_Qdwlvmnpb5Q z0qFw_JXoE9OoOQu#LkpBv~4VO`vtCKrz~A_bot%U3g?S|v9f&$2j(SGm5X|4d)N5>{@zXwzXqlIL3oVLdwARCw8?b*GgR-ODbn z;_H)=_b3wYGOJ{?NKw$Gv|7Y_Fhi`Za>C29_6u}Y0qJT${}tA^kq*F?sb49oy0Z5| z2Y8jBj*Qk&+5EgT?MhKtX{b;o6piV1jJ4HId6ecDOqV0j^5S~D6&N# ziC|cSd_nkA(p^F^qP!^YN~$U@dn)D?n1sfk7S(rW7qQ5D%t|t15%JGHrIU?Vdkj9E zb*na*70hts%lEm&P^F*N=cf_eBLz{3#bBFaz#&u2_qh;{vg;&ceMHko0t1YE1!*9fL8)CZ#|WK;t(9u+52DO%0a zva)ETtg;NOzN639K0Q?|ODkC)$z#)Q0T)%^XqYXS0g$T=IUnB;38%GoqQ;xhn>#hU#W9=bJ{Ig>vU==a zJL$>jsDJUaT09SIYKExrQ2!bF}lF6_2lC0 zUs8~(9hoaDR#2^0jERP*Nxfy#^P&ljTh;lY?ldk2%+eCb$|dm<)DCoHWPN&Sut`)y z5$Jh-r1R3E^Mg6+R?bn9=gmncfFf&(BDk7nJVlx&cezE=<$7sKyo%@GBq>fKzKKdN zDKpI$_J1T=S(;Z~U09tPZ7l5?X>6DbSJXBZ$LsyxUVoskt79@Do(4FAw_u*PKan)B zRJ@#iswJ>1AfKeADby6Omvab0LkKy@3pZ6#LLf)Rj@Yo}gj31lH3~!bWbFrcOxBGj zLZBRqaaW=(wIvwBBTAnVp^YXdHW>3P& z(dXE`yfCmoNUt}QYZTxQ@}a%n>X?!NI|w3D#@&U4?k`pIx(-a`5AAsY z&YAl#3aWUX^P-LonjKm9TCZy}fexHAq1&~dHjHC3gmG{mP%o&4a=nPorC6T|+MGK< zTUtQV;6qOKCmJ$tlQVXMg&4N-DO&Qhl$E058naZ&(@`I6HG~ZX8JeCqh9M*%AtmDj zq)V~{5ONnF>iEe{Nk*=?A-ye{u?+<}4Ik<_cUes(tYBAjxZkYv z4qAe1O7=n0MEc+*C6C;MXsjB^BR5qwH8wDsJ(W+^nfN!>S0f1~g|k$4nc=b?7~v^< zxY%W;-w&7OBL0`r?Nr{9H5kn@h{HF#hD#lSt~|ug)2kdp^3QP7Bs=Y??K!Z_nFP*>Uf5FG#>bE>*&{f}VI#VFFgGOURjLX* zZbdR)wvI1ELo+}^spUwGIGlnzVu{O{fVs9u>#av{0a8-AcKYjg?YXP(+_`>dTTx2> z{P~MTshLY9?f^YvE-x=?uXQYg(``KM#^TT?0mFlT!XJvK*ld@LG%ZXH6pf`LEg&IE zFPWm{ut6J7yOA<*(UQG2yS2(^nwR%9t~$*bp!CBIqCNp?|7E@0sLZk>WvRW4uk*wu z?Tmw;e~27zYcr$67Dd=0>e=>^mOPcVnjsEMSE!a1OciU@;Is#4sw>k%>Y$V&W9Y!h zAf{Q-KIOEo+u>ADRI>+WC-4bU?MqU$DBmd(>O&}Hp%7Uvu^t5K50*_mD?O$^W#^r> zuNm^s{y(`+^p#$AouJKqPSz=a&FdDoGup7Dtq!pk8KMoPOlyTcK=%OON+|7O(U7?10@V$&3={&(A0Jj zioom(WxkDpSI}R}znArFnzt_NsT%iMmZ|0T{}W0Q_N?Hjs3<}Ekl5G7hs9^%6s4c; zFvi6ZvosnR+tAlb%;IRM9V*P+Q4|eqKWpt=k``hLlqw;h5sE{fpMD-r3}EUQ%$}?$ za5uDW{0(+{Hg|P|w;Q)TC3;k&{2>F`Ut9|8m37xRf3;6FFY|eNzq9GKmji&Wr0c!^ zb9qo$q!&-#V}pZzT?`hWK*- zP+!Bbs1nJC05Q;=^F9>H3u$k~@$`Yx_-;E@P?llfc`&uI5x`>xbAXiqY0}59LQjXY zVuU;`m)V%&<#y|ZSy|=N%Wqpo7CL#t-ZJ}|zIFD#QD09U0rh8W4uKkbts7_rO}D+J zv+)M9{*2Si#TR6v_fmBsJ)@Udm-kC=>CWYs04~(al6(7oX&-1YM#UMkR2U;e-JSG2 zax4TIdqE$BomHOI+Cl(yp}adadSQsXjlY^*UECPS3W&Gt1~vxkv+V}@G44K@u$V+c z)ZY%d)%z>%t}ULI^W@8Yrp9EcwuNOmm~|Np55K=WB%~Q8l3%dF-@c|F1E>=nUO7k{ z2JS}sZYcbf7g1I$oDv)h?Yq&QgKAMq62W0nvF}R*N0WPZEr>`R1p0j&Hdy{l{urbH zvvTsbk$O&Wuz{NS)a7qiFb8_0wUMT+4H97@?N6e+6Q|6MJsD>lpr~ zmWK!UULYakvd95A&cYAye+wHinYXUDtD~X5wwlrG5+9Ow^?<$gHKN)oQrjA$ zj?4tONa`3^X2J(TB{jU@c|0DJ+@OR?-PJI<`CZ)rPRJa^lrgF=*i>|dQo`;JJuJGT zh~t)wJ>lc;7tA9{W`@>hD8>PgFf%}URn{=T5R%YPw`LiBn+BR&yrrxz*HnUt>t?2H zYtq#i$y%}&BSo!JhK|li7lt<6W3o2PbCqflb?#|Md)=9^)?9Dse-F^I>WI18(Y=ei23e1j60K!u=he{%ow2> zC;?|eD$$_-V+5ZM&V_y-oeS*{J}q);%f1Z~nfklLO+6AFY5-o9Du9n=9+mj9dGYgQ zFdrM+K9f=i)g#Ff@N*@G#J*b^e^T=u7|WyHg?`6S58_}8zEcYvCy=U_8uG=%2IZ>~ z_Nfj-p$xYI`HIZ%iq1?!kxEY@$or*N;gMxiUZP}I(VhjER92GtCEy70B>W;s%t&^B?26C_jhXWiqsx~_FSZFWfa9TyN_Ia6CXhz)o zMYdwe0%n8&$8vmYW*BfA3_aIoh6EL7@O4RJ233x+0l0QlS7P@iFQ;w%AkBVXZ#i_7 zWpHFyImTQ@W~oME#GIthil{2mth5$=S-T@20E$hMD3d2cPfI@vacMNIlO!?FtV(0F z(~UMwNnF%pIfm`T1wJXb;%YriD)Hb-EA|jw|I^Bg_5cGpYaK8~R4%gN!QSpfO+03M z%4kci%th9R6lDEX9$%mmPAFC`GAtCF+@-2e3O|h8z>-S|lIDFB>a?-9^b*fL24W6< zNE!yhj(`p-oYTB#OrD#x?l_k!7nyWicLtD}YAO*ijI_dV2kZm8h3dRT1GzMcD;Uf< zu4|r^%0;%;`Phis^0M)-7|6sMEdQHttsK&*eSA(~4W&9$VGSFdlKy7}u$FZUj3glq zC)|cKs;}V#qAgQ^wME^fiW@2yJ6#Ncl|w8CYr>QME;xBQXPL6J==o$C%nfU6oR!L1 zw$}OBux3U5ue|)&eG1g-R}5*2Yq)*oXpJ~6fLdgXhZ;BDP7B(mvEjj%rkX0nJ!tx| zVLj-_h6d1&de9H{%J{A^TvK=T&N_vYYo3C+(e-ra4g9D< zx{_D0Pog{1Tg#yGu!PzmE$v$%<^Y+_`u0y&);Di7Slrpx5YAE%&<3^Q&guSKe_2Wf zxQgK)`JCmmY&32aIed{q(qCe!LzUSr`DI=35c?(Z zE9JD9FwN3D#GB^D^5BI_7e$3*HP>jZJ$&sz*SI~Ft75tzt}UPUS$hRfJ?O)r8(>cr zF)Ytu225%*by79JQe7;h=dT0jZViD8W8<=+V#yUs-<->Mwt3l4<=QWnt*jmRp2qOC z+=LYD{bzbBa0cYrfy|swuN>g!{FKCcz`gq1)B)px$Wc#$=xf$2a?&J8H%nWG4Fifm z$=3keK;Mnl#JT82*GX&wb`j(7>Nr7i|xmcPai)nD|^0e_=i6new(W8=~1;kr7Ri~ zn@}_g5J50pn*t4yB>RH3%~UD1jxXbT-o zQMH006P7QIQc}PWpq)|K7S%s}9{9C`AnK9X&2dyi+I`&v@dW6ANV06b^aiZrIfznO z%@b@SBOv*D5j2nJ6cnG;X3E z#t)4eTCzu)Za@a|xO|G>mB}s$4?tXxAHY!@$}aJ9eAMO7?mhNWb^3;cHhvg;5orA0` zG7D!YguSZFMH*(fS3qkaO{g2v$H10Z@v^SbGL3)@lA8KO&OT_2$dF)&`ufE~p@wb% z-WBlIx2MCaW8B-hXhK0xjwhJYhUg_k1yF0h;z2bUxKeX8ujuSFtYvX=%0L9N`~MF) zK$p>Z>Eh80fM??vRRgR|>^4iw%Nz`2*Z!NtmrFsfaAF!f)O z57x2;eAPh4I$`D9{Zy2lxs5!8z+EW$7t*ngSleJXZcRbM?=$ zc0`7{FIxSF=oI$~XT1>spOGfpYy32<>P_Mfvx2$f>g#GyZM38**5r7+X^O1e*AQw7 zu%{Z+&M_oOCX?c)(Bx)K>!jD4>SmK?PZbAnIW$==XPNuBD6@PF&Xr3J0qKwdbc|(B zqtW!xvmcys5UudOoaci&ui?oWW2QR3Wk8gfa<4Ty8=U6ScJZ@|&y&K90V!Vw5z+i& zvj&nSWXw|3LWo8}#*J?Ykfx+hK40`>+7VuK58^!PWU3Z*Ri0}yq}^$bR#I_*O(HH$(S*sza4biIFzEDj zD-f3Mt!d#pr}s*qYhKo|O9OV(|xNh0Lc^94sz`gA-$# zZE08HW0s8TpzKtW@Y=z<` znVurs;rw}9_=tA&b)}zZcK0C!bx1+WK!tEj%j;*5w71>~x}ccK7x4wHm`BQ1Og16y z;{j-uu<$5r0M9r(e}%VRbHposnu(kgm-8IC{gMSz(s10{;KhIa#l`r~6H-d-M;;uw z|Nemocg?)`su$0wq!K%l9`Ui{4v|cr6R+VP_ZWtVa*2l{qRcFjG3yauB#eatUIS~R zIh)4Z5K`m7qmkkAj_4nRF+ zbI?zZ$Jvzz+0G7ASM6w8puR(f_j~F!$FuC1=1NAh&lxp%aWC|yTw_%V@0**S-2c!+ z)Y|PxKK|&V!c6{B6CGq6DYFO@|4 zv{0=elO-*E5|=RnbB&MIdzVFTg(mR)3ezop(^w(b7{Fdlm+Ve*vQUs(4lMaU_ic?z;Hd zF7b*@UCG-v+4g9z0dp37#Ii@H`9Xw zHQgyi$`Ft$5LBPupB6JbhpPdNH}uddn?Z_?5?u1!T@|cDf1;cbb$575ab@(=IhLdU zHZchJQ2l%Wfp$$=$v_&dt!*~un&)9356hVCY`Eof)aBZ9RfMF-*>z{%iQi(z6J}`y zd~75ZfnA%KcT{d`O|$xn0Q*r3_9e=(GFdMR`dv;>92p7mKr$NQp{OLrk)sjanZvI3 zEP@5fLagNKni29d)I!uJK;&7usDI*G5+~{OEA_H?j8-g6737!#qYk$IilRbcAk?F} zs-m#2s7|KH&A}-t(7K3L;LQM#C=mcrr)?BqjXtoAd@+d#Y^J9d+6j-alTTDqDDJWipjo5UKiRzOXA(4_s%xSW*7Bc+{kgcW#uF<+ zrUYe(7M;4V-tD&^H*XlNtQ@_;eE*?C8||uk?-_o#c&uUK#`WuOoM`yB;qTyW8ISWt z&!+*iaRaq;s7*y;735hWj|w8vf-LKZD@{ zx2qv2P9MC4q_pBt@>f6osfZ^3?Dt+PYVW-_`90RP+4%6rr=G%VPhiiUz@9~bU&rae z!(3d|m6GH{@olcx@!)|j4a+XeVUkK%$P{@ELog;2(c&D0l<}1UPcQ7Q(8v>uG7#13 z$;QeT($$T<9n`u36$*h(>kux3Gb8FOMwy$Uhk?LJ+@UhU0J51z#$qbS9k2e`<{F-A zqt`+PM$*Hvzo~&|#?C+^_eI<`W#d<|!2LKqahNFf@Pv3hc4(ux(?+uoKfKXKvK!xw zJ^CFD1ktvr@BdU9h(xwHE!D^$)@@})Wu&BlCezvHw zs_08aRgJH`UOaPsMdxTDG1>{TWK5u5*yrG9oo8$nQL|v{meKV+U9HWmADx>c--2<> zJIZh@U2miVN!gjzO2B4TgBVq*UcTHS@GwSz4u`I(2h$$kh z`=u1i+g3KDET5}_ax#rg>CW7NpC?+NfQq*_wrxn{R(3b^PgU3L zZ5-L&QRuEimRs4=5Uk(TylzW-QC{=b;d?K7i>vC33LC16V#OcYRoPk*$q$d^Ma#N2 zH6=gbu2fbECyK&Yd3j~$hD6msd$k{-tfb;Qu%FD~*(ly&<~24js;VF_6!ak%VaJXS zb1w(DTh&!LZ~+9D?<En1Wo_lH+Jw!J6h` zu5}iq8|43nmE{1|f<>@21W*F+3QNN427A_RsG4$vOy6{9YVV;vQ}?cGEGQ5UB_nRA z=-gQ!AGms(xCM*hF{S_u{~&V|Y%r3{U@|Mdv53@ZdKE*oB0z^7k2ecWqvctt}-#g=Z^2|DH4xY_in1M8}F@tf`#eZ zrfwQa)QxplMheFKwObs6&tBAqz>nA_yh%CohI60RiEo(|>v|D-+|txgQw6uUIO$0w z3=OZLE4FQc$jTa0o&?;V>rijd7sJn}b*5Np#6+OXJd29+KoQ@Otwtb>J0%iTVr18L zEFKHRP@|IFoA038K0r8JXdty z&*_Y<(_Y+9%267RH8(eaj`Rl`0wuAMk~nT;*o=8c7;!=00%!VvzO>>7%K zX5?P8V=`2bgD)hEZNBtU@jlNBj8Q~4=Zy^Yba!=Dlo7flUC7104fpqY{N9051hU79 zAh1EL$RNX70E8hDI8=ZKk^_Je93^=#L#hZ$Bn^X^xT*VI!H7SAs%FN%2m@y6cT>fz3cbGQ4# zfw542enq0Fu(~uKzch4KR1UV|7jI4o8xoBtFvlsZ`-iaZZN^tgkLzm6Lmz^^DoWog>NDj1ioG3yTFmv;ss_&WqC;vv>7g)lh%x{D+NZ3vwkJ) z1K{x_xd3Ky6${eRZhjI9mDc1EDvSNex4$h0 zljBoUQ^h^IHZ)yVSG{Xs+o-68vH|}MH*M-FivmC8_YF%tRD(FKuV5eJ;ySZPAg%={ zh>EEIzKLt8iK!Ef62zPcvZpu^aH~`@l5HBp&hSV`k;2yN4yX5mf}oQPZIzu{8|T#F z(&pApuKvI~FxFug{Vdm$J`RlWi#ARYK4LkIdfv1)uLD)2 z;T8}g*I|V=qEawWHTg!PItaUy)bm0IskcdW!A}fyAbm*d&74mLP%f2;MkU7L)iEAj zrTR-Z5b%ER0+BQZm0*1;O}bTQ8B`xuy|NG{y_+&X9Ns~GFH&(rwj|a1u(62Wq@%cC0tN-w8|_MyUa7i+GBZ#?LF|)p^+`c zab0)q;>~?y;(_EX6~%ZhY%FXv#+T%KD~^=J^G z+oCknD0hHzNmNL4tDt<~Vp#T1S-p|kX<{(|SZ1o>lTtiizokmB~Yu#$V+gS=)=>L(otJ0SMw}$)ladY-w$JCaB!1% zaL~?JFOrSxr|V05TgsFFDkeL{u$a8LWvs2#GwRK+YO6jijwSEGiryn$h}VDW(q6IW z(q95=w3`U*QBVN0m|2D&xv6#z2f3M$8NATF_+7gu%B?MX#D+k^*`-V3PI1ojYd~CB zM9cy&&WG3HH?4fP^t!CUsPxJsr-JZhJdkn;(3BSmHFbi{Hq{C^+1sI;VrpD@ZXu2w zA8jtGK_VxCK}V{*L!NfA9sZ54LW9 zci+1`u?Lb*o;V@yPQC=MgjMSj3*wbnKiY`*n_)ZRMngMm`(Zs&G`msO$usNF8;gPW z1hxkYz%9fLkEiTVrb_#%a9$aQ60}bslxC;<){l;ijIQr9_Z&Wa?X`yw@9_*?wQ)ZtMVUdfs$r z3`lUQ1XJ@O+FvCYVO&W8&q!qdNgO#=v$DK4HiS_N}zEF4t2}hP`2?#$6|L*PJ)AEgC zF>My$f5io0(4w&7yL9yl?jS1J!(X4yg;GNg)KEz*!nAVa#YpWMt# z&3>cYqa2gjFsU&IV zBCC8JLZDXrB!ZN5bx|2;=zvfCc}olK!~s`*zaLBvyG#2aRzjAAE`|3nu`&O*5r67l z;52Wv?%}Fn?6B^=Se$*Y$z7xN- z?sMUcR|51Vd{BcLcZxde9_UfL&AJCeE*`V)y~dFEf_3jRI>m3Sd%tlFyu8&o0i)G( zmvtY+_YYh5A*04)x6d(pz2(;TxqyT1o9EBou`qYymh*|$qiu-;x6Cb^JAcR7+0MlF zGe^4l9%^;?&a;^FEN0C%_6+Q~*>BgC z_Vo023Hgi!MoD1h7jQj>(N4>?IEnA(jpO)t>D}pG=J%u6Tkgnt{JVhBn(?;}&pnFO zPZ-zWS*PTuS**u-+?`(p3*Bftk6Gb=V7mFgOCQ7bWcZ&4wHI+JLCs0;p^VIdz5;SF zrCwl=0{EvDAyTshKBJ}JugZ*aQ2$C~uBU^E&{m}U#Wv(0D+7U~4x zb))V^FSwpQfZ>2Kh>aUIMvV2w27tyG@ZBcluQRa`lK}KB##Up>m^QWngs%er+iC1F zuEr+rG4|rs_hZ2h;>E8ut~0KOTz1&F!MM?Qo^g|LGZy2oj6X8IVEl>k65zgX8BZ90 zVSL#5xbc|rsPQr5KI0?CM~#0nUTyrP@n^=TjE9VSjF%a2H10L-H(q7@q4CGYdqMJE zYP{ZfjqxVq<&Y`=&3K3L0pmT!2aPWqUj$}7f~9-_oBTJ}j4v5~ZG6@Eit)F`-(j2o z-uRmFb>k5nhSwSYV0_bf65H?!jZYZwHh$l@+jx=jV&l(^e>A>leB1aJ<6n*M8s7nde4FuUIr?QdEg*K->B3eb8Xcrx#Q*?=L(Ia}rI;0rw7XxBY3_)-j5$nYUF)GH4r;P8zShPuu z3sX#pNwHaM5nIKSm=@c_b~udf5Ie;#akbbj_K3Y=pV%)B7{4)oD-Mcl#I@o&alJSs z4vQPajpBLYCULWv5l6&PaZJpL}0Q~yw~E`u z?Zz*}9gwJgPrOjvCGHl#FJ2^GEM6jBD((?46ZeYy#LLAi#4E+C#H+%at_-pYu&`tlX_&f1c@ik;0{D$~@@lEk0Y_tC;z9s%i{ImGB_!sf7;@`w~ zV2k*kcuIU<{6PFr{JZ#(__6qj_^J4r___EGh~J8f&;eW`cLf#>MgkAB2r#ik4vJRidLo`E7`uE=fv3<@F3g|FJ3G5DH-C&|YIb4vn19Rcsq-_wtrr&NgA=Mna7JAN z6GuQrX9F|x5}cSnF@I+EWbWppa|=f=oIZYP_V(PP&V6|EvH9~eM>%fzsJ;zsIXZ)f z29BxA7R+GgJU;Of+-f}!)Rr>~Y*o*jm6z~VJxF*~-v+m;Sp>GK0cYhUciNdp?g{5U zJgqy1Pw3moG>G!)88z43ZBFytTbz5}wj(nOzFY85VEg&GQ^#flbMg}0ZcTd5n)G%x z={dDX+tu`fbLtx2uAl1JewAnL`MFm)kH~$#b068^np5Ot%7+sRv$JPT&73(lcQmkb z=IDj6c64~i)cgpo4uv0D1DS7ej#4LQL@K4}s)&Gp@f3>Si;A++XjJ$Za zo;l&2J#!+s+j{qT>)m&&cb}J+=bd(|kBTg~ zJ_Pou=Pszre%0%oyyWh8mMiz1b00b28X$7s^&xOT4Rc;zd z4p4CR!kMGzFU*_=vn;>J0i$Z&-7*8losIJDoL!i|ZSL6lTja;E>VItJ+%37Twaq;? zf8off*%J#hx6VfA&fGe?aDMifT(Mla+WSw<-#T+L4;X{IFc@kMP$Vb*f(tWfjLGw> z@bR1Qo~LHdotq2t!QDIe1kj0>^SAL+NcO=^kZ&okbGORh@tXYo9B|Yb-po=6VS_Vs zasV~nZZ#e*9B;Qh-fn9=UO66q=Xm&CjfWdG9vaB;cJGPocfEh)oa;mI!kM|=-ia-d z6Rt0FW{%I{+rHkBz)bhK(?`z+j;PB~bvdRkv+8nOT~4UWE$T9-F3(q&lj?FxT~4dZ z8FiUgm$T~f0(DtXmvicJUR^Gz%dP5io4VYtE_bNQo$`_k3<+Fg-OG)Z_XRT!^vW;t zW{%BaJI~Fj2jv31fG_1w;TfJAc^k2)iu{nPXo~fNIyvfxqs~JMT-3$-QG%Ad&v$5y z{2T@4IW~9dlv?UsMQ>EES!c*x1+en_g4y)3lArM`nAT5^ntMX{^_otZ@} zVkLhla1kZ>MWLG*slJN(SohAB6uK#p`c=^x>)rv2Tt!#p_l0h{qgu|pUUuH~vh(RL zJD>Wp^Uf0@7KyS3bY8Y7g)Uh?pF0Iir0x}kvhH2)=B85WS4EZNeSwP{SwFhEx-o_P zHF_=uRuy@YzZAO3m;5G&1WS#1!L=+GT+4DHeOWG~E{h^_a*VK~WAZj)kvnT_*9xW3 zHLI-~%3V}Vb-u%ySb>YQ$shCXNS%zLbE@B+&LZVI)K2}TSVVcB@34RJa}GHf)#{Y9 zbHN#fa)R^f8Uo>wjl$<|nO{(7GB~5&BY45O&RSQpzrg)D$L4`S7H}6NAB$@))jOw% z!|?0zQ}aM)xC`6?inDu9^vs3RN2q!_!B62iO|bKwTRDXgm?w3ECP!vZ&EHlqzi{mM z?CCj4T1XrU@x|$xb4M@WVd@3)Ps}cy2G@7wl)|gQIYkE`=L`IWA-5yt#;KX}XCUoE znF`$i;&}M$(X;2y;ycw&#`((o5Pl6qKM;gQKtLM49C?2Z^6#r4g}(-J_iG_fzYY@Z z>!mdNM#$Z7hK&1G$h&WcME(xrosgT~4GH`qPPY2e=Bzi zzGnE0-IG@z^co&$OSp&XnmcgX{T!8Tnh90RTWE{ww`2NGQlF0|2j7-`SmSJ^Y;~+=Ya) zst5q^2LCOCe`Chn%vD@onUUo?zwlj8`x^#4_&9yWwuTM>0L1fm2c!T1_%90H#6V*= z7h(Va;hPJf_z$3!@E&Fk=C%L;4E}e$8~^}T5uxGM+}y^~3;;m=wq=7406KSlA{CHm zVQOgd-G|0EkK;ewjB3wXd@H|oao;k@H^^XYA+9WJT|58)P^Rzph5!I)Ayc|w6&riw z?|KMq-}a<`>wba`%1+oCdVJdl((uhA{tuuhu&;K8wx$5U^|yVX-(v=MjA)Fxcd&PM z0RV2lxllI%05WWnV>YRSlj%1XzT=xG`yUYBMsojm4Iu#l8BN=DeI&jI036uycNw71 zs^aXwbllO}=xg-z9RdKJ_btKzK>x=8c&r*5m>3vnK3^e$fmQ5%C4OPmu`mI|;y@As zfC6Ml^Z&>o-xeVQ>;O>z3ExZ_05{-!gaCI~#s3ulwPB`Y^xoGq*3;7vR+bnLkOw8g zx@pQ}^gP%*HZU^O(>FFUGP2&&Q?nT$+#3Y~D5_-;EUMczV36@94Nd#jYx%DQ9_8~aD1!5r-Tu@H|gUw9CY z0;DNN$aT$)eEor zCj7jm5xkGj(}w$$?~&ipG#PhURae}7XT}b9kz;Pt5JY2w`(mVE`KTZ6V=2Vx4#o!V z_^%DOvTY>t^EF~HggXe?n(BYeotoO2oo^jI?p#oIq3joQ8xbGg<-^Ndd#0|tC4Op{ zM3Cm50u^)<7R(*}n#2Fv^);Qa1&|o7KP7vs`-ktL-7T)|y0g=>_SffEN1e0lGxG|o z2H}^DlOKWx^cV4N5O;I-USOH01Ikx-Y&YPRjY2-?PlJ`Zl>X+*m@PK4*QWU{`}Dy) z0Y^S){(Tjcd7vb*o1WF=xA`g4+bkQl{0>$2+M3Q4+S0lW`kjB17I4+&p$xY{q`>N_ zN`F3P7Nxsl)1TR)t(@^5osy0?-7qB?alljceu z&AnK8he|6hRs#XeF}jn~ss&gqy#uzZN3Om<>fC^s(Cxx2P?--23C;SFab!!O53vmS zf*W%1@0LB9)oaMP4l$vmN$rQAR2YY%U%Uqc_7%KEm;j(WTr?zXmBd{vN=zE`BDAi)q;h9LCq{Eu3V84VeE z9Df;h)ia2@g7m84p5^W~Pe-gK?l}-b)BPOjIgHN8E6jP_tB&VSA=mY_1^!+x8+$RU z-B!bO{nBI1e@NYb;dstCSZO%&GfUXWMb`ot=n$c=APel=?+LI*z7K_Vb`QSbwPVfH};iBH-&duWpDiZxe@^yzx?G4ly|2nFqm^H%bX8VNwTXpPs`1ovA?{1(z ztLN2Wt}Yw~b`CuItZ%wJGq~Gu(tDloE+TBsyTg9Ubnj%nG`(xx^A(Va?H+8l8vK08 zPsus-ZQZu4u0(Xry#4RS`Z<+tR@LB(`P5a9Ll>%u&ip}1w`6#4o-`y)r@og>8sM2TNQRP7j|+dxc%A~rtpyUa81|tcxQt9RoR}XJ<@)m zEobAW^1j9aHOzm+!D!Vs>=*ma_lb4A)j7JGvtXz(cIP1XFsJ7;ZF!?FW8wS1HNrO0 z(*;o-dkAwS-F*y+uW6v)MW$^F#je3ZpIy?*r6Aq0{$9rlV z;ER8XZVekVc6L|cE%h@;hm@KvI6RhpFS(hh860NIFGHH1jdlO|?*|z8R^0^)N5rAkweqHld<2qh5 z%J$)P6;6nC6ZhEm!R!l))`+7hj7{{UgBKpItC@z5w!nAeK~^-k>q>PnyZ0gCs~3YV zG;hz%=kVI-J7qAIEs!>W45SY@o?cF4)*mx;kQdIs?<6cIjH3W?&* zrcrcADNjDHb%?TN+%eEwNYPbV$l|Te{eE=$Lz;aogVi<{b9w)Vvh!FL-`6Ad>7>H_ zIntBPAfBg9?qjXbtf?;BmazNkMXuevndLozC|Qj~(3uG2>53VK=d;WyvQJFFSnV_g*)sCT+haj(ixIKu24*KaesPYR7x7W@kl@qmO$` zj1Gb$r6BJV4>0l8e2w3lWd%+$2?ycd&A-w&y8KA0$p*CpiO*W`^tme>`JFFK@gZJU zFIP3^4}lmiSk>zJ&MO%HCFyhG?J3fsM=8)P$-i5cS8VxfJ;OZpeW9k3`=*RUE@o)h zRm7fZnsxV*XO@eE^bGg3H800nzQpphmQ_kQ3gJ3*mjK5RFV(mzZS6+0kN0N3k~B+0 zg0GDyFsFjJfct6)YdvOKA98&?3mJbbe*@n-Bn zr*T;ImijtHNYm=`u4611b_d>AtH*2%LHd0W8G!^`=myS1f_puF_O18Nyu(k*-}z|` zkV><5GN6}Kb9^)CU^;j|-=|QeSZfl9bO}7WVtw2&+$q+%VOlD}Q&hwNd{QrGCT}CG z6i5I%@pXPfWhgywgX^^ra?Eq!lK1|Kwd;u*|KKSL@oz1Fc0F0g_$b&^hcdBS-jdA2dl6Z{KALPN@HiVj17orF%v8Jl780XW58iSn*=zf z7a7*lBF=`}H^p!WSJ$vXYsrK+;xuBJkMgEw>~}iHX3Eb|C77zLM~#~^s5|AgVi=1z z;9O2Bwbphetw+~O!)N1}6I$3p;#%Q|^xcj@v{CGs8`6T;!2|3#E=^kL3>qwz$7PCY z!|0J~goPX&hhIJBi6QacT}d}S z*PW(0OomUBg|@jM&z+~|t~wCXUZ!(S_cA_ipJ{99yS^O*&p6tjW^B$D8amG#FP2^1 zB~I&ceXX4skNW$hGrO{Mmyg=6?;xY>#|`N-m%&0FcjFpIn}eu?fx!j!^2sEYKHPw7r}@r(}&C3>W?oE z`A=)>hx?q~CYq;tV7w;%?P&45+xUgHaHc%N->pYl6Q_17Tk^$?izX4cF68PZsZs~; zVy<-`jCX{KtTk)^pcNVb4KOCIfh83ot^trb)F6uy6W34~m=Q;+YSh4r8j~8;z>td6 zLqLLo($+)4{}&^me#dyVFtDN`nssngpkhiQ-zDInwEsEI0Dw3^;2ZzN0-%@QuJsAX zJpk|p3Hto?_?`d!di;F<&%aJiiBX?2#~-SGOF5D8n1Qy;qhKUu!)*)^r<AQ2qW*$W<4EV|>22fb z$y;~%sqN}6s}T5g`V};qdM;IF3OtGD8|9a^(k1f*_qQpF;k8Iu4RmuPR(Gm7gic-M zEktBi8YUi{r*;jjW4+S5QNprnQ`(|ezC{El|08!w2vYhWJe=B?bNrY=@)9ETdHtd; zB|kHdmkoG-2q>sdU5L%_JLkutMsSbXn+CADnaW z5GMne;m=Zq%n<#OrNYl!5X7SondMEu7k`u5*=}W>)8HE3vE*CbJBcb}tP}A9nO2;* z&M#q$*i5Y}v^Y%9qLIBxHX3oDS)bp|)T5aEV@~l_yN{DeH5mw0%Nx+hkiEzQx%^h@Sq68_y&5q^KEh7uDwS^(6p4Oc3x68Z}c9JOl ziSo9T;;RBj384#^_lfJ%_}o3!!8v;1&eAd&-P{W2uF)N}40(CQ&vrHIOcaaMG9NQB zA08IAzw&c)@|?5XLm%y1M-HlJz7 zx@lYSm8^`4X9GNSrT&^vwmxp7~}KGykxO9=3ELE-2MecwzI> z+=&|KbOWwXWvswY*5l}b@nfcR=Ia;;l!chESZ^B&1y*KZigHCWRG!2{lj0hIP9ch> z?g@PT^j42FiLH08boD+{y_k5-5&5AX+wk$4(P8A*jJ?1)_;k~W?yT2+_;ieCO{tA? zq4Y&Q((!m-GqEsUTkSuHZZT=Zsetz7*DB2rGsFSg)$>f`q?*#CJ3ve&XF3_|*ww z2m;kR7wWg=rY{0GQ zn!?s2(ZWd6c#11{5Nq897;l9A=%x#ZXqH?g8`;ci2I4;jU98ull@J#GV~PM;TyJ4?fpSbL!TC?V!`N zv(mj!cqZ9el3kdRD%7gYmloSv|)Oa`*rfNRJ26IVt@8Sl{2?3mcJFp zh;hwEakXs#do%yEQbl(K9>wG5-rW3t&~?{yF%rjzUNz@-$g=Ahi}FOX?5IjYmc1mk zPsu`T0j7OgPxtQD{O=|PYKEc=?!ImNV5xW6KQIUOKMc0XN$xl;Wu_O3jb_6$rZI4p zWUb%__e7sZkFG{zBdSHaN9QuPh8!8^N-Q9T=uu+cW6dN}VB*;|l`*mi#8sWoNyHM! zJ)onZkCX}=&EQ-2eoL>vsJA4V`RFk1d4PQuc__c9#3@M#isc51d-h>LZH7>*8qdWI53*Imy0Zy&M9UivF!pbOu#C2p zRax`s+h;AYxW&`7pF6%?dbj*U(g}rpuR1BvelUhFf$ugvLJ#a^gjv+dyZh;WRxnW6 zDk0@BMMs=$K$;A)*jYXdrvPwODKFz%V@NZ_5~29H;}C%ad3d+SAJU^2H<$rEaX5XG zQQ^I^=d&c?kr=|>C@zq*d>i9aFeJzx18m^?+86wqs&&utIU4AVI3qP5-Ui#cZmy5N zGrmselKfhe&h=QVP}6I>C4PrWRd>AS1IR~#ba(qNNC!#IRstEo`UtaZ1e4s9t zWK|qWl0=3i7X#tOprA(I1PhgnA2JQ61Jle?DZv~(pGDyiF^$?}z~SM*YJVF^;9D&q z6-6DWagTIh$($Kr6NA+75fZ@-PE@5rKLb;;wTf6Qsrajrd@7k=yvyWVtHGz0e9JB9 zZCmH{hsnFYujh;?8a-AmO#MCQbX}>qgCqHuN=lToIVPmu6KbP;n0LB3lR|d7e6kuv z!u{2rR4xtmXF%C)1RA4X>+a$qMyj;qV3_g;7>TZuPnp%u0CsLGWu>_ z4xma_P{wgXHX`>l`j{y~pxn#<8nD|3uSnBB(L$w<31b#h$i}FQt{YJ?^tsh-YHm!g z%hk+~%6M2bgVJm9j3s+#?YN<_579ztnUJ3he_kvJ*5nTtOekD9=+uAM8al#onqX-u zuYfMwH^>OzvMTo9ku2V8pAr>a!6a-t9d3Hd_rIL$pQE?MUystD21OEwt;tCU+E(U@I5_%i_oFn|T z%ofBNT#}lBn2vK#1{OJz%Kx{Ne+ku3A7+p}_PosdF0_U=s2Ol@C{<*%U<6>Z(ATvl z?n6DZUgBn~%p~!H5=aD;hKCvC2^K2tJG7HWIC9J4~qd4?pn0<=-vCMc5M!rJpqkA1$$4&()iInan^2yUk*xjcEHT|_LOiv&| zz}>;cuO&8suK9TJ^^DRSX^S6K0|zE7q24@IBBfZogR%-oBYiZi-uPp)F!PQi4r0{1 zb=|q?1apqPAYn8JG#ZvVwjlyICFs0@WU4_@h*O8fYQi{*+A)O#ignv1t-L@k8%ITb zXD%+auor4>hX7$P5FgUBzVwc&A2A(DOZxFantVO$P&BipUmDz<(RJPSlq>l4xvVR> zwuL zm7WG(Y^{`9FEEmfS%n|Y?oKK`1UZzjW)UHA>LqOu_a0C0t$|Wf;=I zQ^B#Zk#l%`Jls;qPNulja{R+rSY>(PgV8H512+E>^SZ0|U&Xvj=q8dVp&xt%F4(o> zkA{3Q=2`$Feh_tZ>Xc50WQlq>^H9kPHowUZvx-uBMC2z&Cwh}}M{^yu>q$m&j^%0E z?t=jk2zbo=$zsb^bkf^i5MIL#8JACmpT#@(x~*Qc#42-aXx-B;+SA@ooVhXMf^?J+ zA<#{uy#!V@0#|$@su`hJ#qfq>~%LZnpr{k+Oq>yMMTv78PI?DFrj^! zmEL+sZyrsX8&-e4H@&U9&bH_hwIKupOr%z^F*KJ9h4~qkhYB|_#^je)o?n2FSFmC0 zAdkVb1|1Z(fz42bnN z%Ge{2rB*jb*2d=SAge@_(TKqRXLRTNELT5klRY&tBlsBeSBA2}c=5B>8LFzYap@@H z2(?l$AE`Ovep511)}%PJppJH1^FbmC?ks3@cFrRIAp}hIwdo)JG8o7f>$P3|zV9S5 zdE;oW4Bf74I~$9%Ir|*}mXbz3X5P==u08VGdwlZ0J<&|e0|$mO~H*tCC$Nb4nlwyy5_&b0Oh~`9)yS&d`I%s_;H{PE~{bv{%vxtV2 zh24hYx?D%f*`_7FGIKS}YdGlbVc+$G3NjWf#@#|xGNTua#9 z-J13Cp)B={&lx!PCrKu<$P>tKAqAa+jT8*v z5|5z9__`}*^BmesGFeF|Ue!vr!fL=W`R#O2@txuca4E0lJ{*fmvpysTUUf5OWkt3@ z4jsY`PpkR*qHz)gxL--reGIrtRKb+4hLbPMRUA3Wf3ejQ<5#(Jy0GiPF<1BO*@4d&s1iV=~h#q>l}3)g6W(grnY4LIl1 z$tCgVk&iS7?T&s>3Gc7jJ)&0zx9AkCcyavFm(l;nBbICB8|Q0eX_PpwiHZ}(PJhEb zndo|(V&-JRg$r_1A&7CGA+mtF6+)qmsy~=ZD0ig1grB_u0Aq2S;^BD*@e= ze}Xb?k7H|1YqTFsB&$S>%lN`i95|ry)ZilMxHtLdGdELfM;N8aiB;V{jKdHO4{hMzu>eh300xwuLYzQ)2 zK0hBWFPx_H|1pFDBmJV+c;q=ZY9(`t2K}t79fr|Jv!FGKsRZ5mOtYKGMwwnCDLhaT zT>j<3f_s-i9EJ%P_}KdnDxs&%q(48uj-o#0K6t;8lJoA7@Dy_ zKf5Vi@;I-J!s>o6=A?HnN{shS%_+{^Z92#G)UBUBBEW%MsXeUNf!Fd2t`+Dx0YJDi zM)Kv378WX4xB6ZPNd1GF-?)I$`a3uxmqg5eqAh3{rln`GXd-#5$+n*AYD8d@RH2INpjUN2I5%la`yAUX>I@TK1`s zTjeFA{aLH|=g<4WV=slEsP3HX!UgD!#xlY|!gRc4>_wF2pKj2g?HpZxaUBLu4(E2! z%a(Cu(rHkced!DsaFe1h*~f4W!8sUukkC!zv~~rc`rTQbSFC)yS`pZ zq5^hn=XS-}@vH3UE_u_D(4X|-Dl#&LPd2Y&;-8=R! zlLLWLj~4eDOmUgIL^ztclfF0G)|b4%(zq&J=W)14`;Non2qj=$gPWh(;ky=fI$d)W zHGY?Oqu+SMN5Redu50rauORU1VwwJMcecORVQ1#$O|Re49Nuqz*cFzulKqWL%1Js@ z38dc63LUHzOuG<!$r+jcfrX8{KxA;e!>37lwUhWYU4r0*D)o)Tf8tWp!+entE?i{MFEmc z*ZY~ix!~r)>?-`75BNS8<8?9Yi^IG=H6_}OMbFWk%1XZO0i6NY#b_`4!evZn!5WQ7 zZ3PsX43Dilgd!4KkW_&wl7KushdfV9$Kc_e@cwm!8O?xF`=6xg`R)h!N%{9>`4l|S z&7~t?Wf#kN`{AK{)z{saENjH;{>*@ZzSBrjRHq7Dw;rPEI_0UhkE>>a=7La|B&p zyF_tbdxrYm83T7O5nB#M@$}o;Ugz*AO`C}@-Ur)>FkSE*ce(K?Uzu};j_w}bP<{QX z@ls|YOKNTXei($K3Wt-M^C3V^N~-*XQDprkIWVt*iNsU0&Be#$@QlG(ts=kC>~$?xQl43|lIZN-Wof)8>pq1t|2gc# zCvdDQ+s*1V(0656IP)jX$ybGg4t^cVhN@PGQl{B&X%=%U0OfLqou}1AMPGb2s}kC| zset_zESNe8TDi1nXok+JaaXi^*(W2O!BTn2J@N`lTBoy#M);^`9m~Naj_A^>A2&266oX|V?&tLPoZza-UC^A! zb~R}3h-SqUB~TLxn3X9KKHb;NA#dw7Ash}tzEZZF}TAe^n(1{enF2!2a}bpO0ue6u&(8R9A)sJ*cJ zzYGMfLUQFD1h>)&yOfDuuZ)h*XBfAfwSqv_+kfXFTD$=~iS`~r_f_Se`Pv{e2R8j? zXo;f(E$)cd>uf#6;m5-YQ0BBLs0xW=ASl7vHlv7h3n5?_hc>MTJUpPHW8mHf%M>UC zT3h`3D}4uh{KRl`Ox$g{j;>(}MJYD3w>^%81;dx@Y9qW|AYvTZBAtT{aYbB=47Iym zwey$Y^BRW+8awK{6`CicFbx9{WoYl3bSn1euy1*chSZz1J!SaTFrGUBO=RT14Ua*` z=k@tq!k6srKG=0ziynM=7G5#8S29mND&-q;M04$55={3KPauL{hynk%aTa7?+3p7EUX;)xO>KNcNkzCfyn zV&oP3Uf}4ZvfM@*VnE{8XqOt+!Ep2XDrw#R$`ITGQr!~VcSKsjWfU1u zGWh_`sqGx?rV3l*{S14|YC2MSu>Fx?{f!0!GTj{Q)YUoGjZ>9ljApHqNBx*zUm9+# z1AbGRtW8FfVcS4ZxEx4_D;G}%`g zN_BO4HJ`(WjF~?E=5?QG5v~r0>B;8xdgOd0e2)-G2 zdcdKCu%lMKERizW#pw-dIm@&&(c`xrXR{8h$?k~9*|}@hoc?>0=$@ib8*dZF+1LS#xF?- z_3pSlBF{v@ZtviGip|T9-Y?7uvB?}BXEhFwu`+duqayz$vIqcYJS!wl@!Kw0FP)i9 z`?D7U6WPZ_Ev2P7r$kWQii2<8z zB|Z3pw&>8}V&HRd*!n5C$Gsf*OOJP40Aov?h)N|6%Led5AAQvOYvq6nmR9 zq9v70gQCe+9U*jf1gRy5J;Qle#|bXkfHJK;$n32XWO`=U-Q&ahp7)&8Uw}v$j-PxP zY>E%-kX-B&!jS^=;?9~zYBq_VF~MMLfjTtv4s{oJIQQ+a;pgBC_zFp&n6{kTJ{|Qn z+vMg3YzU6oI%_vJQkY7lwFjC(4TJv5U%yJ_9`COPi{4xXI@EULlus~B3_2ZEWok1r z656!D4XdBbXJ$U2v9{J=W_8u{7N&SVc=S#hv*emRISO9tySzBg_Te()*S2V8g}Y94 z4Pz;?)sSyic_U`QtuVC)iaISt6sCzp#EBhK4a54a@dPha9I5&K4I-ub7t}I*Y5ieQ z{!oCn^$ZzK&GUA1IKCn(G8DueXSc`FI7RNtxN}3>GDs)w+*k0Us6_F(Vqbx3vbE;Y zxwF=&!D%F*^OASJM+47?m1jC56@L@57k*H8dx7{ipFM4~fC@RLFKeHU*^5@PJ-~1p zCwy2*FA&J&e4h(ESR4j=UKkT;za8vVe`2KZr*WtCHrs8QPyHZ&1IX*oRB7WfyA|8P z$Ww@T+GO%6H={Qb&=5Ep$ZW3YTgV{NC(X!hpn_B9;X)sbJ%VKzLI%5?rtk)C!5LWN zTK}=`p$j#{)!d2^e4bs|`ueCBeC>_V2k*S4ujDjXK=dW|RXlSBMKVO(f^{xU-0}gP zoi-c@FUPJlx5Oa;-|QH5$1AYSq(47iX$Hs}sMr~2@rA1J>aUp1y*$VFOi`P#KVA%F9!ah{3dIH#54tH4e}iF_W^R(QOc<=vcN+7Se6%3Abb6MV4V z52IRd2@mHxW^rUT?c9>dDDm?iQk!%Jr_B9$Jr1c}h!#5Y6|xJ#wr8+xTDuO2j=r9? zDf^m(SRlT_c~4QviTy0*O`_E8vh3IXPe-y;S;_*#SZVgoQ&yO?bS~A>Eht=R!XzuU z3S}3mB%8)|`4)$?*2W1^r@QuxN#BFg5*E^9S!s6l<}vkE7_6e@} zf5_&tGwHfm%*y2$7q~7wy?8M|T$&*^dObphNhD9DMIT%GM1F2QZxc2R`2F8l6zbz7 zz#WCXm`o+EY&eL!bOpBe+PyMgDf^zz<0tZ=6mc!0&E>7!@(HA(l5rd{EP+~tw6m*z zFG#5Xnn?w&q-oPImNEqr9cV~4G1en*BkTu+foI`+6EQKHiZq&NwNq!F@%Kj=dEVF2 z&bQGz!um>z8Ff~q47YpLvi^2)c%-aoH#e8L$e?1lwTJi+iK)k|H2H%n?!_4}Rr>Hp zbJpm}9q7MvGle{?cLzFT^=y_9_zROWfpfJ+e_A+Pf(0&b8vF{Xr{M`S>UP0jrF%%a z@nWt!8z7y(n0xN%UUi`6Zc3-;Pn<6#1g+}&k5-r0j!tdb@p7&EmtJgmhoVoMbm3&P zvr+1^KelM%Du2@T?$jb&Z?2dc9N7?1RL<;nJVjzaS@S#_ zplpcmcUdt#rcc?WDfT!7&NEPQK{hmy^ZmOzo^o7*_dEe7eY!5veykZM7{CzNOyYoj z2j|F9*lUUk4G)Z-Uxo_ZO`Lq{INBy~cOW8obJb*!+nl!`DC}lWYt{BRB};(tlml6Z zPXy(b@LQKTr&V$~-ESN=H;+7e%*LDG%1?=os zOmq?*$58|zJXfYWd|ui1;KOA7hnNaDd3dgtW!u`vlhOLb8iA_&Ri(S6kqG<7kCG;%yQ;6jSP_z2Jb%Pqh9XGI_m?8x$=0d z5zn(^Ca`)pc=d3wB<1Vxc)vPYw#hjeqCHo(y@MP9=VLwMnW$oEe&+k0f z>R=F7MM7!K4iOBfnw;PYT2X^)#E+xtdbe>lzgG*#T=`Ke5{dw#up@60OW}d2lSfOq z3wnoo{l9Jt(YwAM6X#Z3Qvz1Or*?X2T7|JqIjQL12S&OUy+VC5y43|=(u>;#Q4kvk z^T5neehUq^GY`S|jHoq-MXN=Zyr7KgW!XcFtDVl8M0p%cH7BHy87`Y-JZMeFs!Hru z^w(Q+pDpHi=AuhpjMo%o5>Ydx7)`8-ZXI!y>dap_8vl$T?IJlyXq^*|K?tr87mE%$ zz--Qx&wnc`+eHyg&)_U>Sh zv;yZb9)mW8#oizUj@LLzwEpZpUQq#br=@TB+*;bjG7v=XBESC!=fn7?@~{srZMA}a zv$tjx`KK~rwCOPKwTv+6j+l?^Y6Iqqp9sZ!uvv%by2x?06Q7$reI2L$X3)R!hUCySXj_S2$A2_78T86pAEORwmUiEGe@H1{U|l>2jV<-wFijDZl3Z3AFu z=!Asht#l+AR{m&n?0V;HgsIHY6|DXoZLn)-CzdnCwT^|gj)~vDX2`@2NrbY1xY~L( zu%v>(_&!^FP8r?&+}XF3b2m*@moL;74BG+=SgO3Io_Re~0+WaZ#o%}*`Rg7|XyYX+ z+`Zh3ZxeI<(9y7%}zju6k$xl|AkzkX+3vej1x?>cFH zwG(#-!N`}W;IO;ab0J(?43Eg+oNf{om)Rs1BdeI=#)@gn=|FA;^y=(rYuwk-DZe2f z_ubTb#wmSu&is)Jyog;(VXKMUnt+|bwC*Y}4y`xO3?GFecX#}(vSxyKP3fN3B9e~ zFZ{Crc1+u^9SsR%X94W!#G*;(6Kr{F!6gI*;aFx@FdKhTuxT)GQB)Exz%s$Qi`)ED z8+{B%(Am#*A|DDp3yTaGdA?~?Xk|N(AxfuSg6z;TvN1VS=p`cL*O?|7H&ZJXA4;@) zMeF^HQ&-yUM+-_9C#SY%FjYhXPY}&BpuqIUd-=tp%1GH7KaEAS+mtLVKbvbHD2;l* zTFgnuf3zTC>Ea!7t{-LUds-3c>!y9a@zugSpkDcBS ztI$oy;nnsndfDjKemi7;AJImXHl8$3s@qNRv-(AnUz2yx_i*=Rm8NhFs}U23RHPa% zTaNzOU;isAZA)Br8rS5DkQD}9bki)%7(=-0Uy!RBT9_hz@$M`GE*u9%{P2dFN<9U`r zwHVCCF3XzHrocrm(Vsn+fI+fyc*Q6SXP`JHWiuko@*Awm(SBgw;vZ!CwY%Be_3(Tp z^MUAF?uzvhf(iC(WGj{*`rFYZ$5pG&=ub93aKxV2s~&vM5hM?M|1b}xoDCGHI0x%%CI$k<0k9tA zE{I7SY`0A;DJ$6+g3Md^IyHk4DtkIP@?U*EQM;IJr>!9io>b$%jGHQ;&S4subsHUO zLp3!N5vGB!d&`|W2+O|j^UIwF^huv*SD%;NO$@#tXF~#tF9Ne8Ti3q%D3z06zvWUM zV|I&ddk-+>qG~XQR>4pvMsa$JJl72VTSqh9tQ6Z$WGWGyy`sny|G-{yDG`$Lx2F z$wp^49Q;C68HO;%yqAHFz@*`fq%~vNrpaOG;LoaurnJOrkZ2MgcME}^ zkjfts*r>dIDV5w9KgB{RTX!i{STn79x==AX2=A=MC( z6MhfjTSSf>ULiSJ@uAwWdlmE6yQZ-v_$ABR75(7Y?_s~^txqH*v*{b#Y|HhO&Z%xHv7GJ^zw_^Ns&Wwv)|Kg%4zi zbC0hV`uaJ6G$Z^caD!R!CVdEgq`bVmOlt}1xjNSrtXJ~36KwFjwKXI&4E6^i!k&Hi zgs-gQG#JyN>tWf-|AZ{shv}M|^vWq{xHh41GtzU|k-^F2S22!8j?f#FN7!CMMCo4u zJmr}tp>4wEjVi8CS#_~1hugbj8jlWZ1+YONcwM6fw&L3#S;I-&7-+P^;hUDC)wnp9 zW*`7s6q1q&=&H{+41AdkPcp{sX99ili^sG0V)%9v#5f_ih9Mjqap(OPJK_y#%cnQu zwZ&q4FGHL}o+h9166^73@!zL7S7IDOfOgNMv7_?<@5|k*DnRN}JaI&VUqW8I8B01m zNUW4u!uOF*bObR{L%ysZ%(TE|FP$)`r(qo$hwGm`a7yXF{Fz_Z>qK7pWYm(Vv~*&? z>v?K^;9Q7VPAU=%xVh>gIQ;n7DAN`CA;U^*-T-!BRzcyZBwk31KxYa(y~fFUC*;S%mE(Wd|tN(BZ>Xo@<)7HU$2=a_{$R$nd| zxrn6+rS4pW&G4PVPdz9IJN43;T;G_1bh*e<&Qg89b{u<;TicZvYj5>uVJS@ zw=edve=8ws4nLttv_h2?42Zf#IU$`dPk13sb9V7LSKN^Ioe4;~XSx#Q9Mb@1+0F!9 zumZUj^`P_HiG$;nFR))oK9Hx%2V5C04n3=Wy=f8I{dE%XoDwO>4wVz0rUjtKkt zu}U+t{T~29K)%02*`dLKOe!oSSvSn3Ns9`wpFz=JMpT~RZV~&-h)&XFJ2b{_jY6P| zP125jE@CH@#U!0})-Jo3ESu;c;8WRYw3`ERse$4L>nlkVbH%8b49p3GYkb7e_?Xc6 z9ND4v`rs>gq=jCV`=cfj(U?9Ek=NykrPZ^TZ|Cgl61}_iO?s#`Z)V-0Uu=B_1k8_A zPrc~i>V2oGG}rp(ikW@K%>SZ;-u`Nyf9FV>eq-h5Fix#FDjWEmM_Diy-adiC%;;lP zjpt`TTy4DCA)B%%xkx&H3s;6fw}NNCYwKrURvdnK3q6cJyAli`@7?KqTi^z#S@V`bOIE|=qT58dK~8I?6p;U3n?5koOgVuQO7I4 zYKy6NECRzdpw->XgE7JiuX1KDNZa3C#GZ#|TQ}-j?cHccPAqWZ1J;`Q7kVxCr!PE) zj`a4kCPvKqj(PvXZAnb4>_Bv6GM%0riJDjXfb%!Fnl%ePwTGGoc%bD|tvAqD@~Q>A zzx0v4^_M>K%%jr1M*s8d4E09BH@6~t*Z*2*R{OI`v_GeSOgw0M`Tt~ zts}Zd5hon#{~Z(vH)!cHZ51jE7TmvR5o{PdU9k9C6V(d2wd2 zkV!|wVf7^eGnNXR{;pgtLuu)EIv zcDy|=nt}cq9n^G1*XjIoqsHj$e*Kqv=ZJPoX@E=W9HId5Pp|ZHlEqFPD zxr)QEc6Kz6xxJ$CVjM20Uj4yYvbJ>yqcf3o%#5mOJC;#!oK9i)I}T`c zhAut4c3nJv-D=mFjaB^fni13FIK0sj*hH(9eZ{f%(;Q>PeHAm-iCggb8Di<1oYxM~ zyPFZ8cW5w^HtXl1CC-waa|5Xa8oMw{g2s446Jejnqi0-hHmyZgOWZ=0uI4Vq5}PN? zmT2b3U>u;&a+%jCHKkYNbQ-r0w1p?|jw(;3j`C}Xs51O3Vs|4n8kSDOC@CW=yCV+7 zLae<%{t6##NlT`xh5iYLwiUJxLtiuve3uM6-+wYrM@t_v{&2!wj*md2boeQWXXUCYIz?0(s&FSSM#=&?2BRG z7+W#HehhN)ea*;VCK76|X=_0R?ujlu;AFH{M=nVv zu>b~!QUVwrOR1T7bB*yx5ki9j{|EylAeZEJ5nj30fJJ0oL~6oPD!N8G!!s?*-DMBy zY7WLxg%~rVfTXCIPQO|aKK<8!_OsW&<=%HxfBWleU;pjuJMLvqwq8wN+j@HChLzUS z)D0J*FU+)_Vb{UlkCL0oE1T~1LraVKY&vP#*2f0mMb93X84T+nw=^ktv^cX!ThWst zJZIFL?$zEtOC<5*%t@=4#LTEkjJz&d^)=%$lx=ADNoOOlQj1wNGrIC&0c~X7NNH)b zb|N{J#V=FvCD*zuGJCtoeASp;D+T%|R!0xsSc|%%`P7hctABiaGTm5jzZndCyIAS> z>w455UM&|ZNuTbDz*l--_VD69o)#-stPK<@setayRGQW3_*l8Gv}WA(1Y9!S9~l$( zm&f`2NQ2M+gY16#Nqz<@PQKKH4Qk6poR{Lh30givNhE7D4GyU;K|(eK2|Mb_mKo&U zGJ|D_*cRMw0PP@q&`)-X9M<_MOVlv*6wuCF0Dh7jB2P32YEy#)lYI&fR%(cUTcuCI zD>k>tCGECzNpF)hn&*tp1~GlkQ!TIXeY}+|)AzC0blA{(CJ2%YQRS^P)OwP==76Y) zLBB^2aKGgsIo}MSk1`(h4*CX#7gDt|hf{@z-k~GkPLha$rJxC5d|A(PIYu8t6L5H$ zw2s~K#PI0oFn*Gg#YCbwne3m!>lFL#;Ywu~KY#8UoG$L09`t$#r}q`72Yr0}i=<4y z%$DH&1Ju(@VlsI-AUmWaG23o4_GMf_OApgThAr7eH6m->IhAzkNauwlxPh7+6D|;- zBD?KxMZ@m-xtIM!v!ZP_T69#kU8IlxBQI=QURimAUE7up0RDXCYUdPc)HFJ-Z^gFo8)%M}gmM*+#7X?Y3%b&G-J?%YC9LNyhWy zu&Xn6zWB4NYj*7XP$O^=FRjz-d7bq=)mvwUA}z2ZkwK4n4e#mn_k*Dckad;&z;*~wW9|P9yzkoJbK$5 zcieW{op&01=otN!B_nusV0@MhKkhK)f7W5oUop-4SnsX%4D@gDdT+1ay53uWGPzI0 znVT*uhmB6ue&c1vLJ}_}*`vKU!A>#2&d5(?cf*U}(sIcEl4-Rv5~3|h^%`G8kvxT7 z*mcFXRmF5_ErXdV)9n!#K6B*{ z4p7yFL9|;1Q+P0j!bObKAeydfhz>577W2B94*&6RQ$&ZgEkL=L%3SJBAf3>7%<1;f zu(_sJwg9SWDo(YEkW)m%i&%0*pBs083<{{Ox%Cp!_3|Fzd#q5?t3UE*ZOj(>Z4njY zBtO5fWq{1DwPWZby~1TSU+=(J*}zxfr%sX_{YW#NO8LC0T#BnPnRI``w#<4=Y#mLa z>jD)t7wl7wiZnGM^n;;4UyHd7mB!RStD*}i#WINZr7gNTdvPMw-V1LlYNeyIa6E8N z0TZ4AW_v=`J0MV9Q5947kX*J!muw5qVnbIjqN_HRot)iHZ24ZN?YY{HB;8K$_qOvf z6Gwhv;m8-7iB!soA*NHF;UxoFY)?VT!hg;Ovap(roYS z5^K{NMz4vrINMqWp!S8@EjSBju6$fqYeemdhz#$#Q*Ciz&a3<5@nkX{@1LrVjm^xA zjn)6A9d#Z}N2BRfG@7batF`HBb@P}La~SagL;MVT6267#2byWje+D{MheH-)0xe(0 zIOQcIBo}Ta86!s=$x$dja-pJM*0JvV5LK^kirAW%gWiJRViskYp+>a|BOOboqqnN^ zC^q|n_{Db29}?Z|Eo{MY;qHa~Eo_ih^-&M0ESa4c z-&Ys_CS_^k6LA=EB>^ik=4NN__ykldFE<5Cm7WG6eD~clnO?S;+jH>3^ zIi2AqmXKHSYe(-`N^V)lH*>b>k>U>fi@QISbgRv~9y+#FWnW)}1%~{Fb;hiX<>NYD zfc!RwKez1*TfgHwgDWt)P+&l0ki>Zcju)OUpy`I4<#R_s)%}pOgf41Htx-oA-T!!VfQcm9<^WX1 zUC)6Ebn-XQDCi`SZ93vHgI1Vl@LYzLq{EN7)giH*tYVY5n5xg zeeqFebS%GQti6!G^Yp#Vq|ci-G%OZFDAe3ZIvSP7-%p-SDq< ziriFc!-8&+`zZ%OV3(f4I?9|TYUH)e5ckh&vgMbxcxi%DDxR8hl;_e_+6%uY9rqR5 zQCmFU*12bn2IJH?S}=pP;7K05hju_X3#de?MbIOC63C$ZpqIFMS`t z%yByFrVGsox2U=`AhITnwQ_O{I!KGcn5fewX)%?y9b?RPH^UarVhl$8-d^z9>`*Ed zhA3;>R2|ewF{snl-KmcEnWIInCo$auBB+P8A~L5 z-b5;q>KOr#fmbSHhLZ)TtQONK=Ue7^_HuAi(rHT4Bk7UhJo1-z zJEV2$Wg(Ygs@ zvW-wR)hTwI0C+Bam+*uDfMKW}>QZ%=Q?TM-Eg9sh4fARba3_Oe*s2{(e|1ZH;=8S? z(!Fho&%C&;+B6amUT>Q7gE$b9mtZa`)BqzPw=Ot>MPo-eQCAIL6|-uZSZAZLd=(I> zinE;p(NJrNl+y`L(;Q^k7fF8 zIM|!kCg5+|-?GYF*0V4BsU5n+P7C=x!FWr%SWB*S?6{idS|Yy~CkM$Vnn_BUOEc4z za&D-dTUfft@($F>^m(Wvn}!P(**xoqCneFYxjP6hpldEwP(E>bu;a|&cBF2nsr=ZM z1n^xu&u(ug%lCHPJF+p~LPi=T%e2;v`MitsQxixzY$?gIe-uIX&>da|**bn?q_iPC zpxYEWtL@D-z&nlCyGIp=3KfS66~`GIHbW%6r>?YZcPoHzuTQ<)=k27B?X>8vE+s9s z^I~_2TP zUz|?G4_5IPHJS=2xJYx?4mV1XWW(z({&+NN0g|6QM}xISnw*^70?C3DPO!fpV$=E|dIU{L5124At zIM+jvgWg0|Wf51j6$DE=yo_gG!BacUGrgDfJ3?;!V9%B1CY&ACRx;LB$SpPc6U`*% z>8ptT&o<3{40vrX6swttGS}{AgKP6X^|1Q_r|&<`w8{ z-+nSp^tOeY{lKF%qG%=V8?*BX$ukdhI*O^3w`vWo zuuGdic-n4Ea5tgyfNHcX3cOH&$J6XwXSHr|RG+jy#^vOrre_N<*ew3XWUc^$Bi^D?`BPo}6 zC?8COys{e4Z&TDHUCt)lY9yHnR2E?G@mk~j%t?sU=~?uc)~82Hg^{6w{&>LW@R;H% ztfpZyQdE;dLB_NqRhS&bLr++)3{X)*mnKHT3(u)wh<)Nz5aZ?749_91(c#O3#idWi zR3ubK-c3|P3=cdq+Q{~NFSpTJ~+>7>i9X zDWf6+iyX_Y-(-7`_OR?ABt3h}?pSw~M9D0B8~V{^8`Vl_A7(2E^|eI@;L0CCkz0~&8;uSZE-B6QEtsQm;d+`00ck?Rl`}< zG{0uyz|q|URV7VQHLhD?*-OmU3X7{*B`r-u;pr_vTHyf-MmlT_RMiZ#su(R^gdm4v z!xs4}p0!Rr6qsHwY*?G15S| zc=`jEizB*uJkw$2br(;vO`*Sdb(7AnZvzn25M9>v66S7lsfq}!))7eB z-H~_2q_HP=0nP~0QH2%P0bA1zv0MxjFSdL3rriv*J%(p@Pq1|HXJPEm;;3)hQRUmi z?s|8p9yqt#P1TjAruERCK{n*@7bhsIF?x?nt1Gt}#Z8xR7@Ghf{5_pntn{DMX zw$;X!!R*>yZeXmtT6t$!zVVQq7k<0w{5>rCL|63w{kvWDpYMq4Z`Jn9d!-c5x0)u` z(Z6rzydFxfJ9%V%rMWOSU8`316|#e|h$rn$i=3-|(@u0?7-YY49%gEFf4?NLCF+vo zs0Rc~A2ZSn%3kKI4vt1JlLz}<)=7{fJpn-%>@G4Dv_S<_v# zSF2*V+-4e$La{B;kRxkrv@LC`W+QH`#%>v{5BNZHR}&Mh7Y>yZZg;9WQa%pUiPk+lB8w>@9`UCxu zz{E0rpfSY}cw}g31oOBiRV$I}@KB;Z94|G7GPObsb8B8&rym1co+htrdJ7|H!}fVp z*{&grDrY}P=yW2k65DrlADWQ{twt)omQgWoi&XMz~&I}f|dfqv!&T7 ziVc%uTSDS0qb_uEIZ_da#!oGG;qCIN@empZUTtL@#)W84Fbu!_A3ooWnzoDiMyJ)Y zG2bc)k}CZ{GmW0C(b8}ZGsOA4T2K#~HW(`^W(9YD0p7Ssbxm>_HDq{0SrxTNwV0b) zDq~+~V2lw;xg{h8I?IX(C$fNQA{@rBjF`M|tsO|4Nuf8Go*_0?YEiT)IKg$85_S*L zVtH3KcSQ6$TIzh<^X!z7U%1=@oK3ESeWb2 z*?OrJ$Gg;k8JMLm%+{;GD=Ns@J9J2o?STXQGa0+Ha@CvziVDpYRYc^N0EsRHfi8{F z#Jz(yfDYxVCp4FkHriwi%O88xF__*;)vjS$A%0h7d{=d3J1Ktg9TBlB+vxFKp=6h? z_xO(JN!X>oyYw{qD(6Q&@^(D+>GRszX{^&9Sq1Zo67mVlLT?v=Cv;>E1Xhq1uC7CNSQdogh1{WG(x;W(22 z%_pW7>7%XZCBuY~3$V{}|Bl}SV|(rQ04~@2JudwXyhoAqt3G-LqmcUo&P+2_6!Jb1 znt(?cFg=@WS?}Gb{2B?d{E`)NQS54unfa6=nh+Jqvc_AnhmObxRYX2u$pWCjoaH60 zXW+%&;!}>`2h|z;uw_?kC-3!%m-DPnqqnsn)Fp`jO~#_|)G+q45b4r)VO=r?9naEh zu^t3wgK?hlv+F@~Q}?Q7ry@POv9^V$<1a8fY3l-gsyuz- zaO+*B?(#j((=fjLm$o^RvJVU0Dxg~!e9bzR(&upM`(=w85tB z^YeCq&TfwGpicc@XHB2A;P|PgQMw1N<3a9i|4c{eo-)?wG3)fe)wW^$Z#GZ6IqHMD z@Ut>c;;hfhwms`JF4!?I@B>>#HRKEHxKCgAai81mf5jZ2^^&c7>Ax7iw*J=m@+T`R4xYn(i7_%s zzTWgtPE;!>eTRdZVnrK|=YXb;!~=d=QVAXJJU!L7#p$UAbdwz2jPp}&$N8zgp692U zt37+}`4|+h|5GSR)*+~{@a$+tnKN++DrOY@0lH5=pnCsga2|1>^$&{oPx?kPh!^ox z1TmqsKki!^NktGluC4l(F77*&Z2f)|7r%5UNoPGC`1)l2_9aAtK7XW!@q+9HFPMoJ zWvpkiHlcBE%hCyEuXxctj4K_0jTac|+ygH_ZP*)LsHC6%2qV(_6VWiHF7vqB zPI&bT%O0UVD#;-b!KA3(ZgB(udh+if~RVzonvtGTSqfx3D zH8NFFyKI6jhYNbpSD*ju%JzS>z-DL^FXn@>3%q4b(aft;7N#*#e$r z0J*Y3UA5tK#{bf8Jfd$g@@9Xh1DUw8(S=UpoHXE*kWtI@ADaoEw_GX=V>+~Svg1@4 z;#75j%1WN9GUJ|6hqS6wZZZ|_EhC`a#HXp9dhw`rOWgHq7A}q3J)x<{49KG3;!vp;w z@d~Si6J4-C0;fG*#j?m3jJ9|a1woN?UfTjSCVIBsRh4Gwh_#YsZuu|iSbbGRcZ{;9 zD$RR+{1rZUrxn+*KyyAU(O>9X=H-VU-e`U6=zX8g;^)QK(xNIU<0lW3)?K`aN;h%KkMLFQHE`Q50(jnw z*|Kp)=QB+#F=y~TtIke@&m*VR6$vpxBUWCsu6r(G9dZqcdlH}iQ5bp7s%{yhNXU8h z!chzZWc;>O(Xv<1t@>Ev0IC}K;@bq(W=|rS7W4TatJ6=y`-RCrHvNtltiopovbb6T`FnVzfp*iDG8H!fG_OZ31VzEW2Z1`6do0Wa3nNIn6*Riw`+zn@))emE4HoiL`7-s zH0h{7n03Y{_o~G$Zx@P84|l~Ecf}3ki^VP_=@;M`A@Ur)H$o)j_iBo4^6&u{jL(f( ztbr2=IRr<66Xd1bP2SzWQHYC)LR?JL(#@(?!EmDhX@maO4 zoFYvq>eSeo37EeR!`}Tq*KIWNKex$W8B*{dF)tv;xGBiS@b)_n3{Nv^)c4}cj@c^t zX=GmF%f#=3QDU_uqZnVoQtn1YFeAMsk(~Jlw}-h~XigQN2A>aaoPY2R_@JbZyPon| zv?pi__WayqeSTo}=0tc!P%EW=UU_q?x@dN|syJwk2Cg-XAu(e~@Osht3x<@)Rt;QT z*eS_>Z4Ql9`V%f!qh47aTh3<_qy3|8x7E zrrcCi!}U-@ylWn?`syIz>Z)+lbVGrj8rH>4IzZr6RKCT%l8*xVY0-SpdU0*1nzxU7 zVry$L&vW;`E;SN&yW=CN!5ymI(xN|hB!2QH8Ve+I;czb5_p>_}x}}%C3g|Glb=^v? zb6Yi4cUiX*D+Nrk(DBVD?Yxu1*8F3@ooVuL(?2<0-Zz{}CJ>FR{e*}3l?v!F1>nvY z11;N$@GL^r=P<h~!BoWEJ12I$0TH9M&USsisvnnqAz_wP`u>*1M@zb-{ zMuJBU9J_5cdT~2*Yk%y;FAKUCU6BK?`5ZCh`V2gekuq7LzuC-HMiX(DYi4?Msj`$C zikA~*SImW`KLgW!R>fzm2TY?f1xZX{Y8%44u@bN%@Vs;g@&FG06~xhu@X%Y2)4K-Sb2M}md<p?T(g*J8Aji-4AbIr33RyA8`H#S3wKxI?fuoq3PDRWK4-ApYUxeay`3a2Ad*A zW@xJnB{9lS+MJ9CM#e>yMmCr&!sqSu-qxclSmt>@eZ_uW?3s}W{bv?pKO`e$O6qSW z3WdT{VQON$IyPD^0aePRf&sVAEecuWA%j$6la=C-Pthej8Y4wrRF|lvQ`luZDK#Jr zrmxmyW*3nZ@vb?HCj|hC4M6S$U?w&}vlV6lR(UEM;sk}eGk>LHDO|m=aEh}XC@QqO z^p-H~jB`ud!2!ud=cVQ?h)yUjyAt8yT|(j2>XXo!Tf}72sg7Dkx(xuR+5ok!0BS-~ zSBWO8n#i2r6R@RVU%ioD{W0ET{wFHA#$$$)P9(s=vaZE~A?%9-N1)}$^Yt32iP3yR zn;sgTDNpPV*Sy}*bUB+Y%#4oDg-3(_eaU@;^u+ZnJ9^`p@({b8{c1QMDY@KGRZ}vu zmWW3qk}@!qD=S(?QDYzd=vOX2{`keOeAJ>B(vl&M-@iS*&^qabz%iq$EfH01jWqIY zRc*H+Cr*0uPE2mRj$+LU`-v4Jzp+WrJR|`0^kq$V*eKRvjd0YgUV`KHiujN%NSu;t zPIb55VUS$YX*7zBcq0zNYf;mT@qYXTFBmuf9kXcRTO5DQnRJml=K0KSWx)WmN^c7- zXY2aKd>(7TY;c2Fav>#Cz=wvou3CQ#Um5Ev@AKfh(kMu2B+aGMujR|bA&x{v8;hucM1Md2?Qfg^T7P8ifIFOjzG!X-fHeYg?45j`qht*8 z(~OpejRGdgL^KlY^Lg9yC9IE8;*L?O6hJ>6p_+upQ584RCAXW)BF4c4T;lvJ8U;><731p-gU`{wGi__H=XTz|3k=04o9}tg!8`7_;nu!lz%o8>UN9}d z?Deu-oV=&eI!J$OFmw0e6X=U)*1m9&EL&+t1~Z8`i~Xv#{FKYEN-7}-^S$bB_c;qPF~(pvo6T$?b_@pW03jg> zA&`XS15B_Jhc6*4Nq{7T?cpR365>E!*xqMeh;bapqxtGo)va6i>PqU(-}_zyKkOb) z-*ax=s&h`AI(5$fWP^&Pc9X+~k{rM|P81d#FblKmP0{E=gssU*1mf1hHzP!r;6d|V zXrd(#cZ5AW=7=L z@QJ$%F+U~oG-aN4Q(qt6s3mK0u-wt%J`)2KVyw3nUFBK65*wUdQQman1x@5sLk0Xp zTqSa95&J!gb}I*KgX3fAR3K2TjO`xZJj=6X9idgsQ*sy!Xd4<){#k|HucrOecN$=^(&sq*B+cxkx5H`~qZbZ6Ky z6ig+(59T|F72H8wDkwcEV9V5!WGVvKzSyP_M>sQwQ=tw8E)n3W7m+Qlp{yGu^*|## z@-$GghM&!NyO`M$d#sTe?*dAetT|iX)MjxazPc4tghIGOBQBq-wP=uR+`$0vC)esF zLW*~G z4?OVFm)5_U9;x;g%h`}&=B5r+iznB{%#Mh)W^{%E-4n}$#hKoyVfB>P$^(b@j3y)d zQ&Ae@aC;XwZg{5V!4(X8Yqf>y|QH z#RJuWBqsm=HWQ)xoALYVFUCKvUyFa2iO18Kvhtd>@`X37!J6;8_yqm{%@5myKM(oy z%S&^!{k`d=z22f~l78wudGSJC(Tp4!G=(y!kU#L-MX!Kdbu3Hq+rsI7 zB6pKy*&pOsAfIS!YZl#VhU`3fE_T9B1e=(fia))qU$)Wl%H01(d!ukh%Kv72BaNfE zy}6&;n=j+x`VaA~^>5)RCLI@Illb1{$@)u|!QMRk;w<{8Iz{i$7HbhrV;OC503as? z$bzJ~k9G;4(@a_8&6G88vgl9uDp)ORTmn-*VR(d zXmlV75&wK52I&Rx9;dU9Wdf!WU>mY30`BI>#q*YvK0V}qTvYG~I;j&=8Kx3ZaDNY# zuJc%$RQDoPkve49BxELaI~o3BQ*x(@bdzu0>idMlTcJ56Jd>oLI*DZv{rmJo58box zs`+$!{;GZVtbgh1S03u^J@m?}zjO}2ccK2@%cownf8UEwmFth=OO~E>&&iYbJZlNx zyZ~py^XTLFL-;A;kQ)!G0o>ubQ9OV9?Nj#u|3UgQvnk5kXy2_6HUTFo)Df^6vL*Y2 zrUvDpNeSafqn)?NS6UKGlFxHQCY1>d^LBGQSJ zVN<(6SGntiiw)+R@veDZ5RC zANI|lBXEFGl5}M5UGJ-|4TZx)Yt{FyufONub<5f8@^uH_^S3=yi@Dt5RL_MUDv$lr z@ZL)&CNAAO4E`?heSS=H$9JL6)uo5C>QCXUunVw1}OjVn}M2eETuoh<) z!kY-Fh(thM0O(8Vf?dY+VvctNtL^fCmr54YKYvhR`Gblm)^#TkM$KL|-H=3BGOG*Z z-<4E~Snm%7`>cW51O zH|)VNDFk)0+&Bp+WpF^}^_H4KQxsG+ z<;{eWZZV>gWg4h77ZqIM(MDO8zY1tP7|(8zHaj`l9QzE!j%6-l>1jjDazgl z9ysbMJ|VhI)&S%^ii+sVwHWzdF;*NN0+f_!r>Tn-n2VDU(E5x5BxNUg@|9e7E~GyK zj(*6=qMG&4y1I1&+#Kr;D`id*(O+qNA-#Ia`cWI=$`F3o7<7gib^P1Hpc>_P&b&h> zu@qk#O!VxTnXCfXaCNeJW&h#A%1LsFE}vW(-d{L~f4`%n{!mw!4KIw#_3tGUb^>@Z zQU4y@SzwQOk@{UI%R~`~v{27AVnWGr&_U&+KE^CyRT!A;nMugXr})^^aiA6toGdb} zv{K7*>i#DYKEHNeyq%LJWgu;!b=CToSow6W@@-nRfgWX+D`UzOQ@9Br z|2uH(*Eb5>|H0MO`svMK^)p@jKCxE`l*fNm%O{fTY&o-0wO*BS94Z5(n zQTuvW){^=|8@IIN+ zMa7L(oXn1WJWaAnsRq9!eqF)4b$Knv2xf{?O z=R#Oj2ExRp8f58U9F`HxyHwjiYtMBsP zF}-gh=Z^ZH_Z)c9U*I6CK=fu z>hDb@7!s!IPG<>D&YeU%hPo788^(st+tWf|87!i> z#0k5R^-SkRrZoe0pN^F#fOPVWjMB(R0R$n22CxBU@{Pj(i(+pc&Yk zFe*`e(`7e|xCE{^e3WrJ3D@BprBZ?X+B*_xMIkooUT!kiin2o!Dh zs2AHHj-P9*UbhK!9ohXs~Hcq zJU@JwGF<2#I-g#dW;4_4o!{01hdcT7KH5^`Zg=oMTWnyP+`ka^SZQ%5s-g|;U#%H% z#Y7vhI!N*E7xMM$2AH`s3br+>of(hkvbVL4DPevCd|O);qj@M7-qu>ZHo1SX33Hoi zvi7P-64=20)s%-kQ+)UG%}w#!{GSY!bb?@2Ei^C40p zifyo?sEs4;l!?#zP;k5u+SlCR==%I+s!Kv38%Du?2|}$I2&vfG+qeE+@9O)e4B-DD ztKNI}-St29%NpRhYv=5=feV)_kJ)Jh9E(|b2g89yV@j!(&7|S%2&j30Rs#21Qtv?; z(dfpCmWV5=(o4n(r#S{$WHEEW;9vuohTYlsn9i!xnx0KCpp|XVrrz7UaMik^0rOGabh6&39q{tv&!?Ir3pj1;9v4b9N&{&Z%?D1hJ zWk@%&`{~XNZl{KIRuaes)n$UB#bRkT&0rlxWKMa<`c&qx2CDYuVR=t~ICs;5^@F9( z(y>u~iwO3p{>7keg*v17m>E1iin}kwM!U22MdHtLl>Rg1C{f=;*-O?ihRVv`S~d|c z4$;DL`);b7D2F>#EN&fElEij)0mj}SwlMhrno0a~xXtI0Q=0dd3o=M}05@O_mn=&d zoZ9Ai&?<89QM{cKcemgJ9Rk_RLNGM|)Oo@Q?(1O(v&0UzzELJt05e^*0&uU_BZh=s z9TZP^_(2)9%FC$H=J|@fZpX&C<0EvekdFnbLYYW~Rnkh92;bajh0dKjc9)RAyW-k{MBdnA z7!-FNeb{dsL0y}k{{?%7a7%@3}we!oo%Ojq!}o|{!#5n|uE z2gR*$0Ktu#+=R>&B_YUN5z>m79=Z^w91@}1;4_8t&@@8nu@T*&3 zI~es=g|^=yw)3!VKhHCH90y`7|5h! z`FP%u41s@Hn;~cQS=NMBx?JV`W)*%r-oEGWvOk~37PgAh{+C1hihavrA+1zPMoqi) z($2rqklahRX`W6A{Z5T>M@QF0vf6i_&NPVI(1whZ>q`5cb{&6hd-ZLKH%#_3j5w?b zO<-p6SZO~44@Ou)b9ASA+fxD<4veBI(IRv#E8A@!R1=a-alhq9RWDy ziQY7O!{l5WA#SOX)M$1B+2tU4#UqhqXj~#wqOld^L)(Z`E6v#`tOZuS2|rCN-LB?5 z$Z9B9^XEn*5w-nZVp-;W*!VT+;>i{O`ju?Vwy>4phamiNzQvXGUuT+tZzA&V!qT4N z+JGdV#y~Or5P-yS^FZBR8y#Qh!aT#O57=1s2C<+FA%~&ZayN!Xm-$5S6i?pYr3E#; zxB;6X*hnc`yM0KB&R7QD+5{6R>yBODAZc$}q@Ck-C0w*^g9<7(cWm3XRk3YrM-^9W z+qP}nwrxAP-?`@z&cF2pv$eTqAAKY#K7oHWJ1S=mMmJ8~UG_@yR)N%-APbDIgY8!6 zK;C^c&OKEGF}eK340LBO{Y=op&m6m(%j?_DmJPo|QlhaM2@F!`fE@0Syi^LaDJ)m6 zEJyBJhGV`3>Cl5Vnxn6;KwkCFZqIO-|6Nx6(7`*6_Q_`uhj^HH7yZuO)%!!(1_Ua6nRC172D~``|{fo`Pk?a zU0wtWXqqG*VDZ)jU+z5yxr*FiBJ`~uGq{?2cT+jAsAx*X4bgrzqNY_hRfjR(xIp4V zK74NJO_QYUVJ}ToipmD}c{6G7#jX?FZ+b>HbAFMQm3Gm@Ad_%Zw|Cp|Q};^j`a5!U zA*VjCqCpo*xDH{Un@)AK`QuURE31xm$d;f~pz5>}>YlV+=&fOWR|PxR=Xnx67rV(HFuNO1-6SS3qV4#|dJiNv}Qyn?6{m))Jm zExBv5Kk@NT5>EgV1VogW)ly9I{Hqt;+k^KSU~6SLF) zoZMW~+P2pWo2l#&LaVl@@OrIT7uMjYB?aVM(6?4>KY0yd0M=nZ>(ELlwV(ODO!k@m zEznW9%kK%O#rEl1r-DQ4-;gT50WW2lFdsx66sG5(m8=%uX-f4CQT-1!Dbw_b8uhaz z4I4LfM5aS}0IJ+O=)P*Gb4ls*_I-Ud&-%o!n%yDOI=0g<3Bx>-lRz~)NYLAHb=e%_77b|D=qz`y7_o44M01Z;X_mh;VJIzY6R36 zA!8Y3nKCjFn(P$lH^mIjT2AW6Lm8VW-VvhMN#J$CS%>U1xPi#{4<@8R3b(=(uezB0 z1}}x}{^=Jr`;@i~J`ew~xk;m3IV5{+RA(zw^}LUSXIRZ`=XUn=R)3V4?fM`UXc&s|NJNk6mUeM!Tgq>{pc}ycF8TQ|MBLqgc$V_y>El)JV~s*L+W-|JvQ~8L4}S_Z(sW zgcqA~^K|tXp3&_+3SnLO1h!uZZh78;TGiY=oH2Ip5M)eL172E?L*KonL-8jXNJ_v+zrsH8X6E zn6aB|%nY2=S?PtKKdB@+Z~`0kA$Ym>^;|({IV8P%6J%`WH)$3w>pLb<&_l8Nf%p_~ zRdS4tmAQ!>hlW-gQw80!W7{R^7`?KnTS1wb_-||uc2H_>el#$$zxG_L?HqjV)&q_v zRvF#ZSa8eCB>lxSX@NRtVx%4tDqCZz-6kVT0GCb0X-b8BMlHddGE*KB=qXh*bMg_$ zkB)`7plOKvj9OD@Jb^@#^E8TcF1f`97_twn@QZaCw{Y{?z9t~_fZXdHdF5YK!1NI} z__>RxAQQ0h2XYDMokzL6#43Y3`$AGzO*vV`RMfciS|X3rMoBAXe#-Sz(KX({`GAdY z{{5ttoi92lOSQU*bb&pR%C@tc2OsK)Ex80&D#J?kOJeqqPK#1PmE`FFk`UV#UN{C$ z*eELGz=lH?5bz`3NHJg(VK3L-gOY~{dghH8 z9}k3+=>A3;O<;UF`0m~N;^MX06+ie5th?;ndffO)pf`kmFxKg=CKSuZzuFPQ7vf1E&WcqqNV*9 z`{7L~w6*t_$HpKFd2tH^4-b!}XyMIoy+`#g=YQbm+iKe3%TvlJ8oZKDWq2EYICk6YUV! z`C{Q9wSxE@5;hCis%N^yFUl1M1~>-+Gin3s>T4yj9rH6~oS_BBA~*SjpJK(~wc;8O zQ6j<>QCxM68;E-E_qnuG-8iUPNv{N^Fwx+XWBtw}br?LzQ_SMtF;JteB#STpQ)aOy z%%AD2mX#T%t*#sXbpW*fUoe-HN@au}eFs=cjroc-QMksGCK?oLRyo?(bYg-cQt;$} zvE0>^+IC4@Ojj~UVA@Hs3pH0wX{9RS0C)&51JtznSRQzxbUNjgf`YD0`@Lj>e+2>~ z0;Iz%9LS#kU#roU=Gd}Yi)IZZpMvStx(dldY0LFtcc29+N&tnLhzeb;sSxUMYM9i5JDO}Qdp*Q*?Jtns<}D9!uq&SL!=ucOGvy|)7@H1rK? zc-#ON5qN_r?bxVF>zgu?O96=7vZfmIKHVSBQ(Xr?(0{9c7+Z*>37x9_^U}T`^(DtF z5PqGaaebysCT0ZUX2{Rc(4B)Og`)w%a#?!t6n@>tH#6_3Uye~6IKKWDXCg3PS!!`{ z8K%|Il{SmV%W_QkIy+0LwX+-_66ksTrKg5Z6o@CjQ)9}AgO781GxjkF$kYdV2>j~w z9x@rgOeqEp<(XerM`(`oHqf-;M`x~q^pupeAS4qmNmWC|z=%ROfxvtPonJ9x&Kj2U zU2>5wX#9%02liNYITUPFgn+p0M3kz#`Nur*b>RswCo;i5Q*s5Soje78&_KwF* z2u)a%glrlUoGx4Ol2-LN?y}{&g)}r3Od==Wd2j+WLzemS4i)aq!pDbhkk?N0?)CxQ z<;jFF*{%hyn$a&J&F=Utn0fl^(6Zpzc^pXjjSvCn(6y5QoO?_hUaJu8pqqh{(U^eY zU-kRww*zaOCxyQ^DCWJo?FN23{IL z@{L)&)cuK&m5+>mL>YkRBPxTK=-abyK*)?|Tlri$AN13Nw z$bQ7vFLJ4ih>s6A;+pHa;4FuoKnVJ1s7w1C#^R}HTZ;X2$JBQl#uHII1QYu(lDdK( zS`075!nxlE1FAF#t!AF!a3A+eR<=EY4SlGk_nnQ)g#2yYG9 z+&Qp2&$6$>v*z=+BaQ0^`pPjjj$FeUiy=F}GwH>mH=HY79Z(Bk9766ahZ~sxeAc8m^sV9c( zmwO=%d{eO}D)M4gXcS`9isey>MiJ|WAI-Pqju9-AIKqJ%^N@37&^w7nd}nD%uInbZ zdrw>K+^hwgE_-~l>WW3MC3yu;C1Y=hEga9>s z{Aiz{^?4!pl?xVM{t1h7V*ais*|QzIzh-Z~XFU}Mg>cK#+~q9ir&dw9mrDCK@{o!* zPw9R`bM|&THDbvNA{{>9Z%>CuIirHYPw{#h?wL}w-m9dgDQi(_&&I4+lGO_BZs)9q zK1>gS!(QhMn|!Bq9H*Xak;`=YKYJ5-)fEm6ogf78L%_>bMCd zM1kBX{bZl|h8>TSIbcjPgt?R2)D*8e2CqrP_eh_|storEy3ZhcCWmn$Sk1i{q9bsS zg_~UV!=AZi7du0dqDckzzgo*5_z|(|UkWj+x5IVx5nv>@@U2AMJxn(iIC1t;yd?;? zXxuL>D73SMjfgPdjFXvU50v5x5ImA6$dm8l{M(}Y-GzN7KSbguCF(7(9OEA;TX)$( z61au8mZ0mz7n2(ez@Gqzx5wVK%mo0L%_1mB`unCK(X(8bBt59G0L8#GH{;B&rQe<)wLmjiU^UuD< zx6=o_JLq8K0!;va`ToR^u7gF7~UjX?OE0Rb2WkD;7XRi?)@M#^3cKd=>3&Is%@w>Z;??3aXCft@>uOK(!58QSD zgwHONa$k;J;+GlL#)-LCcRXwo>@)2iaTfOHYeCHY2jKGtEr-o-``5=m9Xy8TFue}P zz~B*E2LwnZ#1qsFv!lZjjU1ScA1G|!v7x|c$GgA|j$iQ>fN*_k240Wv?_d1NJw93N zF25_0A;>0cNWRAj@!V3QHaPfKF02R((zj1D%{S4I6D7`&Yy(JQw5@~>7M}HRS{xSj zgonE`-i;ZbO9V_`z)L=Yqkpp2(>Xm6BXgDF9LPE1|!I><+z=a2UW^6jqjv6((mRey~m)*RThBn4XcCoR6Y- zLU%=}O$q;FACp06`Vhk9otZntkUgZE+g2f>e52JZTAat3a#6!mBU>cNhcr;_UfseH zfk;5%C}%xHDrI`7Sj%$&u+H z`n?k?E86kd?H}@kBing_`zm4wtI*>wx!U?En!tAJ6WD;1{)PA8!gl-b3-5<8`B%ZO zw_Mhy5H|q;g1?92kQj=I!fd={xS%;y9x}YJw#HwHm=7)-L-9ZVJpQvgA~2EIy=a7L zR!aUQvYf|JZfHlU`ui@UAE4TLR_yv2tbXI|`n8WUeYxF%+Y_OPWr9cRc>-!2B2vf# zOh*yjzQE^!&6Oyu>&USVjgIkrZG?#t$zf%_k*Wt%<$k7oi{A+^Il=tT4&%EZr*3{A z(?IjnsiNJ_vDBkcu_Ssu4nJw$o3ixh@tUbJNaRlny`wV=(jxj?{&Z~mY1-k<5xZy5 zFw_~zJds>Vx(nNVVT>6Y{tSfHI^1+yJ&{WLalVt*c$x+v^HAQYT3k27{b*V5_Pz;B`-10#^*&62Z&lU6qFOl%L-A0(F zmFq-e)Tk+Tus{)f2VC@7mEK9~__Z6YLIoDr{Wg)HfBdWt@}fJ$ktsc@cbBSQ)K5=$ zdy$9;bOSI63~^MuK1>EW&CUa1aFF9^%9U9gSn~Cpa(o`rln6Wn}Zur!S7;v262~Oa{gg;4i*hoYfLuqt<&BN z%@Ta5+I|>gT54Os5<2Pd<+-LySYe0LATZ4zTE1rnr!q?6*_mfJn{QFMX3zhIV^xQm z+G^ZmeZ*7#+b*}EnGb?i$$=x`xKf)0YV)v!KMHNWp{5cH?>M;DhRy0&e70?yYexq1 zwzrjLvN?vhayT9Eh-$2{kTwg~sasC2E{-8{(vFCo zhp7<{cTtl8TB@L~w`W3>=6L%CyL&WCkHmkj?R~*zMqyW)E;Tp4$eP$}0~a<#1x{06 zBhOG9=2evLFC5pJ?Y(lZ3s^J~Ymi?iPiAC!UGRxYX>YA@9U(R&+kG&4Hmi+8s=UK9s!GQI3b|sU9(#Er$%%tpc@*W;+@VGz>(z;_-pB_lj0KeTka->h6Mf`iO zcPJ-r&|WDWG(AKv@olbUM&A2|tx9&DfPG-K(n*yFx_4uPH_ zjubd``pQF(w(fM=^W4v}%Zqz)<3E?;-A}fh#p!QDX=_87md6<8<`@sheW#0MT30Gu z7&HSjWt3M_p5qIvUb5Gxaz5ctU;IO$8vS0NhK@dVMmDtsvkjM<@_Px98y@A!3nVHB zVujguw9k6X8sG?ny(5y^^AqY0i2V1gFS1iMcCvWV^^n3Nzi5!qY+7dh6Hab?x!z8SuGoCe{GoE^%y{HNT!Zi- zA*1N(UA^BRmt7;}QMbZ-Q&#=@5kD;2GQtXPex3$VQSR?bt2UI=#8d%Nq!toBd4~BNrsm3tE|T%-owcqB>)`J{0>b)DQ>9 z&7^4GRHHDpMn72J;D_FpZKENfe-9ns9j#MrW^8zvu}V(WgJ^ zY1iI`>>{0=%ws}RLD`XUa1@|(Aos03mjW$#<>INet&Da4`S@0q$+!QZ-&vLBP^UkC z755aFjrfLiVab7T`6$iw`rXczLe!5RLkYC$#I(MHjC$00T!oN{=mK%2S+JJUFS5z4 zxq2c=>c5u*I~R{2EofCgiw^(oJz3>hZSWq*&prCWaSEzI%AG1ys)2P{GFI^iQE=~I z_SxNg^$$8!_iAq0YjU;LCE>&uK#7j}TI|bmOgWidftN=~WM*P~r*|b+& z+-KDwg+)3i5m=bF|EFJKS&Lzx`i6#6`HwaX?liMpFo0fYTHI&Px^-qe@-wz5sd- zqwXqc+-k}4FnnT=3WhFzW+o9)mR6^2K4r~$;%M!tXidYWvA(47BHoFYzn!}Y`1Hrk zXfGJ`KWasF+F$wo{ZX>1MPoV`j)lXuRExKb-3%yN$yUIEcABk$Pdoi64qs8ysfN$l zY0iqhmJ?q8+2xIy(@T*1ea`$&eHj+<;3twE>Oousi(YB%yk){d%#*mjTLZKDNMDA` z0YQ2gZLaJTHI4wg(BDkKlZ{X8^=;|YA|lSCZ!%=9D(1on+OzOD&G#?(^B5l?>PNQt zr^$cvQ1MByjD^YdFBLmGH<~m-HiN3Ch?$v_`?nwy3W0zM9RT-KGXpPs9J0^coAiG< zREzc+b3@{aG*IuM^UKNSk$k=~`TK}|QFe8_iFu5UJ(nKR9?0@NFKcWNM@>Cy_>%^+ zsd6?#{i^^z9}cxBKJdY<^AWT$#KhE7=F^;@AWc}A2xQ0Sq5t8Qh1^de`9#hdH|*4I z{R!gTV3@if98FRr>eSvg== z^P4|8ySfaMPwMg*4$-GZk*&pBKs&r4x9}kTx~8lk-G9}wA!gZ}v=br3f+6c&#_%5+ zu>@81M{AI=n6xvn{Ne_X!;@r5$FYbS#{8j0k9c^1rDJQ_RkQ9~&NFW(5$AQF#eyvr zVn#J&>9BTWU)Qn3G$B)%)-`p77bX7sY7-<(ipK4`wsEV?dT&SfIX&H$@=V@vbHwxY z%T$QT?Z{>J|Kw$6y zvPkgKnmrdR2oRCOh&mTu#s*P4M4I=>*+WGYlb-}f;8{#hV~pRxTbPa%PnCp3g$u(F zWtVYu>kaH*g(tn!ViY4K#g=wKVcFa=3~wFjeOJHzne|zqek`O2t~b+~fP)ij7+6-G zi6&c3*E^%UEksHcb5rel#9ZlCQ5HD=8_|PHSk@OG_b?DmL{7Ye zTa^w0iMsPJS(te(+Yu&A{KnBXh=6&<=5sI5MiRJ8xkio|86Xsita%%B4rco>S?%4!o74tmL%z|bf7#8j z<9@B^DVJA(Jm@RIU#+3u`yOZ9-dS;W+wX#X5}*|RUCTCnmMSwQ_%QkH)tO_SadYqvf8tOU%Z|L{UwC9x$CVFT@8xb z>!MmO=V!|CbA8phlrdv&eUsB*&>NLoK|8AQqpVFLL_jcNUu3?_`@K>8-b%4v1QHM( zY_hkMz-tqZ#Twh=$=&?73>&lM7WZ>^oNM8MMtjnG3QO|H9fd}0&%o4%GMKo;7!c=v z#>}7{MVcKg{8opy(!|5unFUBi`klXPg$glpAv%DG%a)lr#NfiZ;5g|5a45c=(~`Yj zpnngY|HEQo8>9II1Cf&CpQzMF2;88RUiw5o((-UL!v?U*LrTTcaNeIdv zW7aqCr|nb{tuAwQD3}vTYuf|b9YVHb$AjdvT52-MlJ#g{{g^Y5NoPf7#2d*KJRBQK zY%l`>t4Pk#l{@=pkblL1{LS?>zqjv00*4Mh?ocu5RT$C;A~WXNF661L_*#G5nJ7IUcW0CU$Mt{8kS%Ey zqN94tYI{~BaQ3p#cL~`iQJO8>Y+qPiUkjL_n1WQz8o;9P>0}x+_!l!XSSC>Prz2`X zf;$n_Nd@2hOZD2Nd`4@*YAyaT)Ps@Rn~c2TfVQzMWA}5d{&SvXoy?tkhc6w29V9zr zVSF^Vo3#x}C-Oznq)()*re4ERU`slUX{}KG6srC~FIT_)xFDh==$$dWpSYx2gnis2 zxx}a4fJY{&Qn-28418LU22tJB%s}&*YokFDdz^RPowdG|#is--7i>u-{eka*Pm^3( z@G}ST37*oJI|K~C{9z(WG*I1`=MGKH@H$A|KJq(6B>lRYC*cIfCcGv! z(Yn|IsNkR7%#9N6q$mpX2N5Fvtas4QPUn^Rk812`ZlG=E?OEOf?9atE5V&qn(f|7; z(&iLLUYQmfcz5MunA0iy?9lyhQr#g)5Cu&vO5^Slg9&Nl^s(=3m9H5TY&h5&PY4RQ zx*k*uYzsTub_IDx6MfpxR)BT}{zc$M_fX4OPByPCelY;|b5HWaP1~wxQQ!TLy-k2C zY3}61^Uxbdk4f$C$+{~dQ#mBfTPVI}+$dJYHIkqJ^89PYf^jOub|9T-U%gEkKs<4G z33aG@y~s*xc9Cll6vg4pCx;87>6kLS+Vo2Ywm+_IPeG1(wQTgZR)_<=WTVEfCKVK+ zqzhntbm^9|s1%~nK0H~HuBEBx&>i=v0)IEkpfV9-C?W?B-?Oob34m3kRi%|kBEX!s z3ScX%RfM%^60z7&%g&)IKFTp7J4RMT8+zHh-NqA1g^9FMaB}*XvM&wZb~f>3O`9#8 zU9X)V9)5lvnmj=Y3^?oar%j&3kLeWYR8J;NDD)HtT`9F)dBl$oVtotKF|lg_Z~!1C zuqmQ`M`glbmi{tbUulwyUBTW{F%gzTe;1pX>6Jlk+jpJYHJv9 zN~k{5M?=j}&Bh{DG+O*-rg=iu-^;dYK4TTxI5b7;2dvvNopRMy9rS5<*HL%}Oh#JR%!+LRn90BX3x#3J#WD zg`h+i{!r%^Q71&QHd1OH2ysY8RFoo1Mu~Xjg!)|02XQV@ zLLqTAS}+GYx>n;^_hWC#=_q$3Y4V16W)`5(3V(x0$y2WgbwK`7{9uk+T!g4Argy?? zWMxeLj(B`Nl*fC-*zR0%8?RW{mD8>w3SBdj3?Y_N%8XxybFzF~O)}byUXTL|j3( zq4u&d&2se*#9{Xk;R|?i9y5{`&ROjpUphachWPrglK9$m_WRC%ODZ;8_;N+Dit^6> z9_zA6yww@u9ZA%-PzI%n6&8_DwPhJ5=xkZdP)Lx*1@;@~UelgVuv6}x`4}qqKhnI_ zI{i*i`X7BO_&-^J1Rtw?J6zxEi1m$&1(O-rYN1EMJ#lTA39F)d{mxP)0^qDTtNcL-I6wXY%6Z=s-3iGncf6^GOO^@=2D9O+C?SpSo_Y{DzzVmG5P3I}({ zMU>I-1_0)HY0kXy@gdSgfzK_gUPU8f8MsExetv3j=4aX(^wH<%10A9i$nVKC*Ww(D z=V#QvNHp!@7nI|H6mGdU&A0cqoy!>$@Dr+`6|eN!#a~HYH+-7DkEapetINIS2X6YI zhN!+Vm1|&gT?3sby*bT&h{edhS#TnbhG)!@O1LHK*Ln;~>hkXkmYrdy+j~fggn{mT zKOXzp@z_Q3CH4%m%9*YX=!k+^m~TH=cdQoz!#5Q@SU;^kbln$rTqS=dXdh@qTR6K5Y{pxM)c>j}ypm5jJAJqvB8z} zb$*Dto4)CIA-11`X5{z1QYbDMbj5`cy5*S0!T_^AK4XgUM2mIVKGA@|(bj$&9mSf` z^{ub;QU9!D&NGD*2B_Pw@0o`i`z6#QNX4vVzj$8SJ2k_n8!2X@CukSaf#e9^eCPW& zc4y{`$R&f{*Q^Zo;6_9<;g6Qjg`=CZ&E`W(luDE1KZgnTEAjs}?PkV;IJlew=3ugw z?_YuU?Rh(a2YyD;EA!uF4b+uj9)a;SF9#BLn>ZyS`uUH>O@vVZBzkJYct^wbf!~=) zX%(N+wh%iK8jAQkL?4r55jo`Rp1+TTD$XEWyyD%)k!*4||2rmmUiG-CHf+?);Y6(c zh#fjiko}0#+bKQo$2@3^^faEn02cDrhkq;VHukS)k32}<7C&S9aJ&xl`OR7vCTv|IX9TfmnyEP>X-~@1Vri>O%HuA(;ZD*X_m6TPDg9jwfgD-E zZQowZ6qP?Cs`MV_4|7yfWnur^M}6`9da7LmGymWz*J_W6!osg}f}f|vRhiGpom%E; z_037(=V@78yiu^1o#eFMk{A!qR@5ZYoZ~7mbiRu7Yso`L`|7 zOs6z)E?rG>=7b@`G@{Q5h|d&Mp;Z7PlX=y*ai-z4#%o zmb1oZEVV3;Az+`pUzNT2I2yKN&|YZ852>Q1Ps6nwERbT|;h?xUX!P;g=AFZW5l2%f zeBMfMI!G-8PoK#O>|Iyxt!fM(tLbmFu2>L&i8bAihf7C_rGKBsWNPT$c6kuC96hbh zAMrvibIiBjE8pJCyPn{Fq+Ba9K0hQ5wc#Egnvh(u4VLiJ@do_(jKqk%=y zLdAot%1&Od^bc4&uicxpb=;IMp{}U4bI%8+y{q4KJ<)d4#VHqinT5n_;#!MZ&c` zC~>D4uHK>(UL#$8$)=T<(RqOFVQgjwT;cH4o6IcMncrMTKS!Ir(*(KOJ-ww=Z?CGi zdGhmgFS;4o(kZ83U#HQnFD$n>n9!#FoA5n*R zh!-1%Od{Bm04t>HO85?m!)zM#2K1xu_=9wchN1Vqx%$Pq6v2>1$Nfzu|%m+eMBpSv_ z$n}tYwJ$SYN-D3H!{6`4rvum&^9PC(;i9LbqAd^j9)}r)F-Ob$$2^&u3YXylFK&$! z*Hwjk7XeMtmnEYyQB$+V#gzmNS^EC&{Yfo!ns;6%?XAXCZ22om(}I*Zox=^{{I?EX z6~%Cd-aIaF;rzQm=mN3ys$1B2 zk{eDexk?YnKt6E6vfD0OF{tt+iV7xv!L^yRjj=M%T*4 z>PS9N<{H^n>6K9*Rfn(nl_?W1)u1d*ePQ{slG}r)9X0-L^Y?z7j=*eUYi{Y4M!)4b zZR@qYJQJ{jp!3=HTTcagA(t`|zkN=6r}S~J@!jFxxUE7@S-xMFL+l}uc+It{@*tGK zjZ(%j5sUCn2u-X;a&?W_jh=Y-OqZtg<$tBlKoi?=L869~z{!V`Lpp>5X%-e}0`sc( zj_O#{U8C?z)es|rx~|8&Pc&=O+hyNrYmc8vP3uVyXKQKSW!ux2<4>`%X@=uSlbjQ(ED>u07-cH0l*3D>=Ioa5J?bZ$sD|2&c3ul6sKy{z} z!)v0&T$-1b`6N4Lq6ZV^QT~x;56+9)MtsYZ#DsvcM0&4M*5!t1ElGI=3zymKN|DOY zt+|WlInt0&YA?I{?N`@wIxon8_~yyN&}Lf1j;5nzWfkqw|3s#y_HIB#rIa?Uuhqx1 zjTPLbBM2H<-PW4$3)YbHU@cleKP5hV(SRiei&H!Qy1A@{pPWA$01d4eHP$Uk^O#Ug z^}3}#WEbj1`)U%OSc0r0hQP7e@$9wEcqB&O@jV{1XCIxx=MGjsRKR(Ou0_XniEgID zb0*7e=P7+<)Ac1QT0nZZrb3O_=PLP5C4k<=2%a+lotn*ZzInZI%vwkoy37? z@aS`i1~7)Kv!1o7eac9aCCPPN>^rWNw+8!ley`+G(bHk{ z$7X){Q4TX~1Xh@plyu7@bhp{Kd)XY|TCatz@)ujAHhF))ihYb|PJ{e(EfgzRdhxu? zVK?VI7$8dp<}IYpuh4|iI+tWl!ElO7@%Vzh!;D-1vvD^98ijVy5OyB_OG&;SGrmUi z--3L1O%dO&y+}P|bSk9#o>Tec6y|{%VY-Bu3HTk)PQ7Mp{rp8Q{Azz#(Q$R~eL8;2 zpcwftTHrqoO8%7p{lz)D^Q6IMPx#>Z2Daxkb|=K#;K>}CgwP7LlC}n72yYO*Ve=Y#7TyzwlSsoJCn4*J@+O5pdCbk<{n*3-sO^>vMQPUz-1xuBpakNjn04B}s9;D$3~iSk!wb z&|k!O+oCvRDSxy<8BEejDK8gFB9LcfzD$r}FxY2A4ABFBoCP4)8xdOpWj(_+${bIW z-l#akBBd4jZWu*Z3kztiSz^#ACTyjP{Te_aL&3zbA4*WmjxyXAigyfiI5DV;+{+9w z_LE)*a=Na)j|gP(zuzxc1f1%8-j5hU{6KCH{RoJ@jiFr&m>#(~?IWWEqqKOXKbw&d zen;^0`?YNZ18Sl+9u3C(rgFoin>3#rc?Rip*mY0naSN25AY-axWg7@`nNrEt9aJgN z9Jm&6m!dR$pSN>T$NubizdR+yn|;aT^xPZp>tdZzU(x-*(?c0=4dRRhymZpA&*FJd z&hz={x7yp!=ukRY6#6=n%9+ET+jfhZ*no63Vq8v@B73M9r;{ zsS1l)6#A@x&?eLuWB)8R=vn+bKs)<3J+c6vZEo+Og7bY;C^s3YtJkrsZtbu*7`flH z6fqe9&g?W z3v+aQE;02`l*4nWTOX#BTbaAumc&A5!}FX7vrvBuR8Cb#i58GpH_YgM7+jqn>DaxU zuU~Io_SMw|HnhF#cHA8VEmnTQzV(A+-iX6iE?aJ$&YgR(q zaw>?rrqK?y{$t)ns(migykiV1DXd_GlUM`w%*E^NQT0WMS=ouUuM!BTy7^?s-oMs2 zGpEDaDH(kQJ8jn*#m*WXKil(Rb;{|peXY}q6LKAsup4BVOr=^J46{zn%s6$Ye%*Rv z`)m9~aK7_~fcraPjQ}MLPpk**oqHAvZ^GhbNs0HIu;*l~c`48`1Ctp9eN&}5wNt3Z znHx#{8@cqX?CL4c!~=F(mnKN=0Bnyj_rCW=>jV?TmHLbyeKa8Y1UBMKo8T0XQ(gFy ztvzXmIddf^rx5M(oVhtQ~dmq)P8r{)vSebe6mJ`#RORUe)1uA!bLiKCc3TF-;C z4))`6P_qhfw!4DDiqv+>)e74eC>LmN4*pmXw7~Z$#Ls_l!F_w7809T1Jx&loC-+5_ z71BzCuymP1{eAYN)QHk$3ojtgY0N5fSSR`l%&)*laMK`DBmBW>1G~g(fbNJ5WO}D9{$Q!Po{-iW7W|0) zh5EV>RsSCg`ccfIN13JqnR_vyQ2p7^Wo^CFhJkspQ(5yDLEwAqMw(qs;sTN7foz#N z`qlf_16it7EPO!G%!dcg;yr`kS>-3r>SWW_pcagF*(S&bqP()~B~relse4iFu{Rzh zpAdK>+t4_!DkH377!R^Wlw;2bt2zpSOn;)$KtrN<9xIGVSR`{wjC_80>YHm0-qq6D z;zjd4F%N#F@WqvTCrS`;;1Z&BMG1lX?qm7$p6cJ?Z$Q4*&0x!-$gBD}L87e=h<6W0 zUfR`P_MvNVCH&CiI<7*l$#_Xb?jF|@f&Rc%vbh2zw4)T+OOa5tckeKy(S7ORhM`$ zAtBFdW-kb0&!)#Si&o~xEYC9~!_kv3Pg4Zg4ZiP>`6m1u`hRlhLb7!)U_u7k9N3qa zuz#l>7E*a`h{OTnIH!Uh1RJ=|q>9yuSt?{lbBZlL7dSuBF!%=Q4os9MhyX1d0s7u6 zy1VMWzE=$6O>q{LDl`WZMj~&kn8t5;Epbt#JawBpJ-l(I7q{X^#x8i`b7vtDBDK){ z^(GBlX^p|glRwDXf|xRz4qtzSW_Ph(1!_zUA*IFT&@d_#(9+P(j4Oru3e&+tB3-X_0g`a3wrxDbTmQ;|gf z)OAGXUDWuL(9}b}5cYR6hW+|Bb1Rp@su*sl@QQ6)15fV)Y?EWUgvA+hC!l@!E zkpD@VP}^5lE>e6CADURkh?yiH^W|_hs&hBXJ&8XL<9l7ZlT{a7LIr0D{E{)=N=ILG zgJ?G!FBk~MzsNcz&;dxfiUJ`99Pk>1#UI#+1Ia!At^Y-J%vml2_xtIemKrd<_tNA+ zcot0Ni4xoenuM@Imm9hI3CPmy2N-tbn0xeoxY9Z|;1cg8g&x-tH(VcBSwm!0PA&EXF_M z>6_E=TBuABg~_~vYYrT=B_^uh1bRnei57|t6oWDK*ioZHhf5ddE9N~uk6kxpxNQL! zPZPHiLmc%Qi!Xp2WYO#uus%04Zd&wwNvVA3@)1`7@}F$adDKs;5>E1?e2N2b&yx(O zy%iHLS>bs|vDp%24qLz)0oCWAStfhGNH(IA6tOi|>EyznqrgHG=8i{INj}mkD1X!Sk~TxBgvXW5xR_DBx*EdPGBd zpndOBj&gcy6!Bz^_ft5KDB`4l@I1^2oC*oo*vEld;i$=lKp*{ zllfhw-~XyF@Y3d6rT=-Zo(70ZaZTW=pM@%`)(D-BUFG^i?w;+l4)%}`>wB>j2KQwO zTXkDvCr|Nh%>Fc0z8 z^Fus{|B_-xY__q7b7`I*$g+?Ml8UJJ0Sxwx`fbKVPM-w%9iJEZh1;b+31!s6!a8lK z#t(@-CW7vDJ_0u{sM`eAth@)RH9oSk=VkG~J~Z1pqP)W7E>`Z|kYwIM=B22qvRx+C z3oj{}*06p~PcUv{u;UZ6g(ZTu@0r2BEAQ($&lcRGAs@vIAH(INA$q(#wPO&_aqeGz_K!rXot-X_|{Kp`nuRD|zT; z(i_{5!m}FfP3>@CfX7rAmK;(6{X%Zk=Wk?i|encDOsG2Mh`@m_mIY^i=f(y1w z1)&YJFE4?!zKzlB$T+-EHCU3B-{2S-Ps#EEC>gTEzy$}BnSY`=-B~n9q$Z!sg+sZ) z9Drl?_T;-WEXNxVGCOR!namPTm)<6_endS1@GBr$m*V245P<6kEyaazH_eGmic2@z zlvt*_v5^jbts4`~$Q{$M)QblZkL zDQxKHYnfb5wxBk`u-JF!xH3Zy4oAGTt(mLd8_Z~cn?oBHV8J#73P3WCw&Nj_Q+Q!F zd4O9}7YeV<&(DB}t@NOWk=r0`l%24R+aK3Xji@cSup~r>8bF2rL=v6`MB(qaa($TG zypH?oZy;w?Q})zFH#LgwXgie1hCy{4X7JHX?rjV3KmI%bD(lecqmfP~p-AO!5|g{Q z5~ZKNDmKd%54p?LZNo%BjBRxc+k3k}cO6I$#GTcB8;;c<8T*C!J7J7fU^4A?B$G9l-Zcu2Jf$W%HN z?F?Ex3k&?EB}jToVoeQUtc4*RF$wX`1PCac_qmB+t!u=UD{r(60y*EF0cJO_dBMpE z2Q;Mypj|%}oVs?PC84Imwg0Hh!0$@fa&jvh(RtY-Jp9?~ek5b^UOhqQ9uhBQ6y7tl zFMBziX_|vbV<^MutB}Do*q`f$Irrq$FNCnF^n{6t>czxdQ}jDPjp4v~^8J^lxf=#i zt!zrpOf>Cs52A)8?7U?Ukqw~7u0jMEO()BW*TN48M^J5<<%km}&)@&OR;i9O^zer* z^BwIuG|&j~hlnIW1!y7`?r1>FT){NuNK^Z}$eDQny4^zCTp2mjssHGLh?E1GFZvR; z=E1@VFflrLIn73QLD<%O$mx;ZWB`{k1H;fMQaB%lZpbYc_QOInFp~QDZh)E7Z(-Y` z+L`fq!wjv3=VLtb@Y>b_L}L-HL};@zgi+&EQ7B57EwnKiYRbc&BjiXBWh?b3uKAEt z5hOFr;VlJ=b~1z&?>NY8z?xxYW6We3+UBTsW<0{fg6pC}skH%mFB74KmJ;+Bm7q{_ z39{t}Vq25w%Pf<=FwmWjbuqjGr#31T{`E%$O=$-H!nwuCe4&}4S zxX6f_PLq!v$&lT8G^jx8J;R4@nye4dxJ^jfE;4mI5!G2($w22$y`5$w)ogT~~6Gqb+46U3eUkw|}k zE@0PiJkGG)Q9r(~mQN%s6QM+ZqA#5!(igYlu1Neb7Kx`Qp8RseFx$se8HP$5Y={@aPsc*-!VL66HUb-C;hgif zID+lG+hc>`z7-PpGTZY@K6MhzCeuvk6Bfh#V2E~fq@ftby(58NZ`qf^F5=bZe_r}zw)DY zVJ|aQ&iS6=^YdJafP9m+QBF?7!tIS`2S(!U6dYW zmJNprJQfV+ch7wqy-p=^s_6kJkP3Qk(k$3Iax@?WJ{6M$$^Mt^*2?6I1QX_^lGP4+ z?#ApLV3GWH(7AN-WkHoND_#meoE+F1QiDEsP8((<@?p0=lI@S7-Bp-N?LX$d3;VW9 z3>t64p6}GEZNVn+P2C_mEm?#B_X{>)6I={hS%d)h4kViZv;li>Ht-g0g4n17e52k7 zr^cIsZH{LLM&j`tup}1TU%>M&VPKxZ;|=@b>*&6CS1l&*iXzTWb8DnvwX;1n+YbSa zMIvz8CAQhw3_IE_f6AL6+>NuY4|(0HuxySD$59{tSS^c@ZP5S%+Y{%8Z>b_BC?cpr z7gZZ|ptW6_mWiG2@r@?V(}{P*&I@is-P9;C>n7z6Z-J5UbGJ8|?T@2LfZAo);yw}T zTU}D%jZuHp7Ho{pUCfg6;NxTfKdQxSX^3@o1cX#WgnDlnD_Vr+IBGP)OV%En$Sc}V z*TqEJ-PbN)L!vilWB00S!cQZXv9{k!F6)cU2qUQFLCsE_)>%=Y+!$_ll`P8<=PT>q z74^%mkn@!7GtwGjx<5=S#abSeW2D&ElZ=C*FsH3)dyf#>bp)(p0bAw})~z5`wN7%- z(V3YfJE3VG1%)3;G|quEMc!_dBUHW@a#-9 z-h>@HzBxlwMMMi!^D{{dZ-(y?0%5#Qoq`^z?*rq7k;{d zI&Vj2<7l@sQtJhrv1(;vY_K1!Kw@`tx3edNY^gmojRV#UHUep^>3M8wPz6sco5d`f zLAG2&x(5vec+Z2bR0sBUO9q+U*P5S&0)`cEaVC7+J=|@HJ=w@gvfeEjnzVhWGeW0v zGKlQuK4Wy-_02OEGQ0DmerCqo?5cDF(mUa^L1o*EvS-)2aIw8m+cFJjw<^YPa>HnH z9I9C3#9=Gx`QjKRtU+^}+HW0-j(E5#2%;r6TJ z^kMqrjjfA8F*d?z0ez;H1%$1sNg8)f#AA_#&IN|9CD!H87@3K2G80iU6Dg<#g@}=~ zHqDm92AH!Lz#LJB<4ssKTN=bMwp?g)7DsC^X~CHrZEgb-s|d8IVB4vcVdZeS?c_4Q zzRu64@2%x&#X!uI4yX;jM_CFvu|A{*IcONFo%3zde$_$CT>&uJoQe>@)4J2~(DvsS zJP{!s;=VLlh0Lu?PkRU2OdMB!%_ky6JAj&kg~bML2C=2Fz=5j~p)j>u9?DJ(!n^Cs z=puZ3PjZkEzKMU<0UJ>SUIFy2+EvTa-d>%WbPQ#&*`N=y4~Q;@(lsz(O9f2D)wg1r z{53`Q$u=c|A|@cyT!_I7h)kIeoyl-}jQ}=PazdBDg)|pr@aA8<Dra z=NmFdpGn zbF~7EaBF<$cNLH6qIgX7PmETgZ#a4W{^>Q=Fp=WMC>Q?)@M!*jHCCvAfwA6y{-kHD z|C8{Tr#35y_dw<(8A;F#`gGSFWLm3xQc3_0z;OCu!=MKmo+J)8Ew=D;b zxN0;wof9uDOBO(vlP`S9M09jBZ`$lT8hYXqan3MH@_AuH=yW6X;uB=PCwNRdhev9u zOeP%4yK zNg=&NKXoV}mw)41nX8HXVKPt)BB{?^Q(m_!>=n}=I4h^|TrHI#XXQjXkxnI@GqU9% z$|weep+La+IoL;kdZecuRpMt_TZ2aJv!L{7bA9-K+q)Ln$gc8y&V9_i^YnPeGh>fE zw#Vb~_%U94?H$iP?Dbvm#+!{3vYV8U>_U@l(u6|7DNHnt)v^Cvmqh#obC^iCQ}3dyImUU<3GlDWZWl>2F2gbn#OFW;k8g z5R^XvLF)@3E;V}>KbIWLr&Q!{ys{Q6j*Mqz$Z%$D6hF^=3&45%G~EO9_}!4F+L!48 zJjvh=I1giRuxXLQi)s4!;)QN0bD+^<&9#-|eC;NK3JrMdOMWrQ6mB6%>a}LV;YRiB z62m91afWZ*gq~fpqY><;?_ic>038p%kLN|cOM}Vdo9qidv^?Ugld~>hzyR z+3FBY4!cmt-5y+BOvw)V-bJr`F8rwOrUu9NygW~51uma{Y&J#}MxUP35=9R~)|;Nq zBE8}=05Mqz=tgt%STvwBE{ZNdH==oQvo@deRFs>|(IWR4k4<&NlqSbhR5241CGk~Wo}QJG7CBAByJc@y?&sq%8}5*3f?tTP?^BC9Y5hlp~2RN@Zu!=Mj+P|XYY$W{Db-Z1r% zt3%cC3gqAT{bY&6zcN-bBD5aJ@4Z3^s^PS_%K*P0%g3Xv#!8e2A8FHwa)Gtx{7s0` z%#+(}g8I)zXp1=3gU}lMpza-BLE4MDUAD)PA;`SAaSTy?8jRvuCh(!cV0$*qy&hnv z>R(Nn?reLMc#~4Gc1H@o(&_QjG`@!;uB07e$so2Skilfu;}qa@C0&E816UW-p>m+I z$;q=V+>IE%{bDjifIJD00V&uJ71p8?3)=IJKs~ZxJF#dINYSQoF=J4)pp3%`z=b8D zkk0VeiBRW9gxUbbgk77nb`Wbtx?ejPz%IL%Q}0v&l1iKDXNc3aaNBK*YFeLv?6KQ# ze<>4UJIH>-Vx&nt)4`?-JFQ#s0C|cXu+!X<1Ef}a$`t8xHH7KZ?Ih|T9SIuhF8kbJ zZvsD$?DS3b#&0IO{nzB{-gxY>4d1Ia8AdGKI}2oP9{~?{asn!QS}N=&BQnpIM;f^m zPUjUvbxPeZR3chluwoLU6jaK)s*CN2WdqCSe8g2q@K(vKs1xk$7}=mc+!1<~)y{Q+ zKl}gkJwdK?$M^KmwWnW}_Y@(6+K2CgO0k)#@(6H8v{Z`Cv4EE(f9$&5pdxb66A0Np^` zS`xXFKf8KPvlI*~j#C7h$aF*VnYR>I1e)9Ib*KGxjBR&PfNuoj-v8cN6?SCs9*2TxgS>lAmItzT>FD&N|9+QQ*;ZtgU7V&3lMd*2(p zl-T(K{4*M~N1Hv5^3Uiw(p&Chw#5&Q-(kI})U zeRaq@Qa1$Bdd&dEGtUN=Hw0VTuL(^g#FX)S19^rJ@%&Jkwq}S~F)h5`wYUs1?4;+N z_Sr5wy3>-_A9C^V_Rg9ROr!p*juy<)d_V_m81Zgs?gq18sf2z%FLO7hqvZI=JavfD zb)g*95RLFRqUi?EpV41BqD-N??QMkz<$%P?bcpyAG|RYDy4R0w_rA-1<^2;m0xWiM z40!7FwQswTe6M%q;cm~z#hWOGa%eyLTQ9YLes-oZHd2CWc8LTtN8DR$Jl~VD%m~6j z`9?X?IL(BW7JNT&OOP@oSX7gU*_xP%%$&@i*H}VbKVJ}l1B6Oin0SU$oPK^vW5Pye z2%8YQ_^lq8sF{=%wAz<1WNGSKRkAUHYpGZmn~V5ciQZ=`3-Dey|1{qF>==F3`1D8- z-fs0sfAmb1{XD*0h@E+H>SS^AOKJL+*9Ym#R&H2AjEnP^TycvfOcX1ps!~k=@H8D+&?+}4nRFuI0|KJt*N#;e z+Y|sD>P_kJmP}BqY;6^F9KOTxi+1=_@xj%W7Rb&@>eP;cwcPngc0C4tc^2BT zUHl6sgvo1%Fxe7_fqiBLe74k#6=Mt$zMSn}Q1%B<0Y(0H) z1B&NZTTw>RNftJSHfzXs>^cq4wl?d~W+_Jo>^sP(FMBe+JsnxFz^|<$-N(!QrrRhJ zLnV3R7W#A{LV0YGWMI?yy#^ixS9N zmT-?s8WtsTEaZ#%XvQTxUs=LEYQvB#6q>6Anu~1JoC2oV^|ZjSE= z*8$%>@~gr%fz=s2L-&{l!x%fTFgp{%+fBA++nQJO+Y$dIb2y8)p)khUiu-81O~Z#V z77!9)KzbRi#AsbyQFd#py&R0O(=}TUfJ!{vEn^Ru>+g0?+9T_fo}o7UAj6SrHs<$@ z4i|FaSY!)0VlNIER%8&9BO;6Wv2=9~hpx_W0AFQgSgig@hoZF(6r2OEV6*O z4CK+Aan%4lxfbOp;3I%V*3ys`+v}n+KNi`pMzuHN3B(%hu9Xb`2G76wPWPGyw)|z) zHH=zcsSp0A3tem5hIp4qX<8^|S(vISNFLclCtbrva2%}=JzF;B$1K~`sP<+&LA;=@ zb?seD)yBHkH0Cwj)xD~*dZ=e;J=7}tZP-7KE;k#s>hMq?N9^z+V#^O#h%Fxjf;BMz zxi-5-AZv`g#?>%k-^Rg%$L-RvT61i=eu>|c`|Lg4zDM8Ad);;xF2gm0_jv;U7Ue8H zfle$RnVA|N>&wJrDwA>on?@~dV6%tv)RKIPG>LFIJ+C^tOfSG*BIK1N=2fcu3}p z+E+NI<*?`FQz^2Zb15h`?G<^rawq5S-mwjd&BEwk0~S@B&lmVy8Q|)ZV~+)rJhgR; zG}@nv&;XDq0m+?RR!yL-8&Ki(9rV5PUc=8;dfg3o$?R_C4+yqi6cl|%lf z2L<4B1UG&Jl2@sNT^;&i;6!y3?|>nH-xAV3T$fx)Ac+7?T8fjeD)67)gH zWwULk%w-Xmq_sdaqY*(`Jc}+czBm87-&UtP?@qYLxy4*1SAojn24nTS>86u6-9(u^ zKi2%=51Suj@7G}5p7bo5!$-Y5lwo9gQ;Dc+~>Oe0=Ci5Hhb{# zwYAcTmdFveM^cK?+|?(-ctw9c7kfDuP(2zl7Y|)~dV6ybLH+nGUO!;+U^f|HK%zrN982sh zdd}`DhdrzX5vgxU_8|`wm&I}0q{F+Bhzl=XHs<+i6sq zfxQ~z-j1##>sPC;wa@=wQc4$||9?Y>;t3n`0=F@jd-;KWWnl#WmzKYS0T+SAP83&{ z&&;@3rtE@xG$1iMV%U_QQ=_utt@xLr%a@_tqmLllAYLb3%LZM`UAG|($PP!atG+u? zakb9ut6Lbk&Z`-1A>^vJPNuLS_fj9M;ALz}d0=4uWHe|5@qY*^X}%X6f2i*z&l=>H z5cHsTHvD;???NTuQ(64|L@9_EL~$*0>Z)~=lv$wL>TL)8=u}xPZ~qZUihGZG-}fHt z;_@}6X&wDAzMJo>EbHI}&^U$N50TWL2wX~F79G)d#D><9cBI@|TJD8A=y!)bib<>X0fk8radyHtgNC_!e7>o` zBi3&@&!+MBR=2VGf^x z!Df15#69Lb-s*}xN;DFdD#uTel$7Bj7N~Pw7qsQFu7``z=g1N;YtY5C|ByVP%Sd8w z8_h5&8!DE~F;#Q{wih0^oB$U$y^oIAkdVd*xg7xjVU9_UxZB=#+E0+=yI@vK^0k=F zk}tz$2jRyaKJfncA9(n(>)&z7JFaJE;f+lL|H*P5K<#h>_j`S` zzA<4XVo_1uC_S6;xN8I6anANdm?D-zSE8?Cz1s3DJO&3vvJ)B8$an29A|4h12V@VU zk3-_Np=PB*pR;_=L69G!4z#@A7?U1P$2PlaO^@Ie%v_}b@0d~Hee3wQuYBYYu!I|% zPk#RM*xURMoYLNB1ohw#d3m~KdYqnQA{K4A?WeJxRNOV8C^mq=;@AeqV9~06XTt^H zSq{EpvY;=Hntte&t%M*vj_Caw1GmGbe!D@H=x}MU7*a=8 z9T^&c>u?&y(;`MLIVee{Ofjc5@i6?B)(z4uSEmh0$NuogsZ#^Dr1VCfh_Q<_eRDfX zB_qIx%UntqM=atN)M!#Ic=$AP%Te(hPR&J|k(0QC0qnNxPH`h!PUfEl!$tTwpVyXP z(S7(N*58wV@i+0WZ$61HVQ3k%_&rBwH}5>EzXQN5 z_*@uffiJdAF94Io(SZwB3aCVr2pyTqtgb!b$_;|KJ~D^JaCx}GvMokX3%`k=R<5#T zzo@)4X32Z^m)weTS}wdbL} z+GMe1O|%_2To9yMz7U+^R^Uh4D^Z}+@0G8`K$GOf43Tq88kMo<6*IjUQL?6DZ#h$@ zSItqfO42eGF-fK@f+@ZbwO`^=066++T%42f$ZH$&OR`nb5*S?+j_GjhD=IPg&~BVH zL2`D6a~|iq*t!7)kh|@kciyKsr$|~odjFgi#Cq%^8rLPU-d%UyB$)L+dG_pzVAcEm z=Ew09f>rMyo8Oc8RbQlg@`GLmqN#1Tb0P@Pds!75NB5ODz9ubaVn9RSzf$fTC17%p z5nU7rru?LlCF@Whs{{!S{2FFa^J!{jsERLJ_@-qh55K zmtc6?;9W>W%92Ka83xHUK`e$f6JW_~1IML}(qXV%o*3RAP^2rI<)iyDMY+PTF7Qui z4$SpW#G>%7O);=hvM4BOw2L4uM*t=w3Yh3ZsKuKKJR;B#5l<6gXozBXO?Z zw%k`>KxfacJ^rP8S3bJ9x`<0J+`LNPSwH#T7s;3ml)-QGhDnddMP%gYdy5%Iv-^NXq!BYe3D(o=W8EH&o zdDAsyEh6`*scA)!!Ry$(+?Pwj!z__d6T`k(-eE*hqslIZ5(ix7-YU|Y(Sefzyb~(K zwG4*s_r055g?a6j#ivy|38M6r&I`VCUD#Lr7MPD)kDUV(d+r$i5SgJBe77*Ok3PC0 zj4b8$`uQICe}51BI$c90^d>KpOeRaop+0K&qE5t;k()AD1QGrH;qRi3ek(^#Sx*ty=8$nR03Sz`p>^ylWkj@I&yQ2j?oz*ku#@PfTZ06Bjife96oXjb(Z(rA#jSnac(z zi;14taw^$Bd$hXwDV?b-KQ%Gln;RGytREg7Ixtye0i^ZO6$bDj0W7+j6#EG%V^rv^RRhO2CDplxR&_j*6 z{fCFvb(L8->8_tSdwTuB{p0C0eq=MDYeoIivBH5@T#R2!eY16<3x7v33J#-9Krm?@ zfVEv3EaZE8h+h%Iv8d3B6C-4?ERGtEHI0e83whk&-jMYmh?CUvQ@mdqdPPFz-W=Pq zErr(9rcnUB_fCbi0T-#*;F$`dIpQ8Qs@iSUp-Zz*?FWVz0)P>KPeHfEpvNbz!;b(N7_T%q(#r^ZsQbpXpq-%je|AP&5*Y1_)Eb`jEFgdphoyb;Ke*v;y~lTfo5gkWcHTx)yvDJ6OGlA=^qrH#rP|Q-%X!fZJa2TFRz}z zC3|uO;}!gxjjQ6RXcyfV;!u#ZIB(eIfxKbZxePh5nK@dAH$n@MMrl$gJ+`M|YhWQ_D5(+z z6^$@)3lghveG6f4c`G@xOG56^RkP;Fyh|4gcIYN@t3oN)SQt4kM6q_Q&?H^?vgCNib9h3Z@}hF{k^ zkV%+!VSIk1)T|e8yUU72mffD7;%KI~lr{|tHW zQ|OzpAD4#*3Oz|U6U+C$6oVVe!8q~nszZjw@bM8If$QkHdRqY$E&1BRr14BAVQ+aW zIYgnlJNZZAwd%xZQkBQxu4kXQo|(U5mAuI58qwpqgC*B39h_qt?55q?#==;=7=IB= zC3&G~^40_m8dYI@`~9hT?^4j;5r~r#8(G3{_WA|~`Z9jLW_e!SpC{|L0E~zbBeg~C ziB7gkj&CAf$_utAe2WqU_D=-Mx1={=VIIs;@`mrWZe42ut~;%=Y6y;%n3szv<`PS1 zF5%6|7{e|Gv{yrs#*4=n7U_A(U)%TTXP?E5&DHhw_3YebhpShYOP3wE_y`_Zd31$* zHLFKw`;){z^(-{09U8t6!QUYBSio0%87x`X-3a-cf;=?=bRz1r$o2*P712!~t+K?~ z7Sa4Ss@3#JA*tztQbb+rG`1ZQjZ@rK>XuHEw?Q6Cv24hR4Yp&4ye;Wp?syox9e_-` zm6HRrF=UW>REg04|B_v%Iu1hT#ct&ftJgYrSX;Qce>SaWE}906QO$hI?N| zv&)mk;8QsmmoRynoHSxd2)1Tk)9}dt5HnZBW&6(N|NCvQ_KUi492R>dP1z9gq+)*j`L+7p*RUHHY)}2 zTq^KWbCG8Q64#J^SDyUH>_^UgWd8X4EBnsQl5g@8{ON4pwX@`V{`iw`ed3;b*511I zHu4|&xJTe#pugt#itqChxxU_X;4WiC-t?nr9%-d&$XBU0f<*cL z06@Xu0177bsm&T5;8XUPm@mrhcLCpxZ!=yX8W+O}FKv?bVRF04@opjOau{|3oU>ue zHtNEk0>9aGl%Hx8V2k`kO1gn5ek1bZ$QrnZC$w z5_s~wGGgFy44=~EDY)$X2p^o2v2^^{R8{P8*F& zR*s%Jb#&#D>(8IRo_q`YX#N|-T+j`~%gLO-?%XvSr`EjreN(l80z^#3dEzxQ#Pa{* z$vRLstoH#EDRC1RzzEcpr1LiRL5v75=TL|s5g@V*TEiAeTbevoP`Pd|CM`G-)q({X zD(Zq<1L?4tqO+8+mK-jr9lIFP-VH|PlXVL`(>1my2h=U-ws~T zn=bs!I0YCAD2q>eY4Tq-O&qjj+!w2d2m6T{tR&spL*TR;%D zrs&VAXA$FiguFo_Knwu)Q4|)eRscn;1(%{(a9k9bAZfNrD(aK%^|ynbWJ-R3f#j6dVHyK~d=?iu$GM^^y`5EGv=VfO;S~ zp&lX;#N9F*urN0AIw(ZysXwFVUVH7mQh!c=L3`+Uy+$dO8dmnsdvD&%+cf}=ohBR< zzxR9!?4Zf0z!lW-!Qv`jVq|d*$5>jthj;j7@jj05)#5ty-z`4CQ~b2JfoJ$_@gbf# zM^tud4Mz9Q8;c#haz0yJLDTtWaTQ(Xhs8CtonIF3VN!W(@jhCWj~3UN|6=h0URO$f z177vH#fNxN9bP7@O&ZSTnY4ma8PCI1XPcF3%PNdg(Nq-$8mS!V6JqgQaO=P7%XJsM`57i zEX+!p8zf6yBEbq9qzEy?92o>H1UThBW*oAjnca}3&}Mdsn01%v3?A1xIu`Xzc7TXW z9w=%PDej8-=jcc8yfQ49u4(yG^*au6n6#HU7?2iMm3(Zo~b!3P|Do(;BxE@zi!%;X3r{Fvs&tZ5Um*NIoiJR~Y zp2E%OL%w9E3w~Fz&$>I1Gp4a2$dA@FqUOJ9roG;X{1DDqMsIa6T@?1-J%h z;$mEf({MJje_V{XDt zxfwU-RBpj(+>+C|6}RR#+?LyM2Dj%9+>txsV|>D$xeIsYZfxf6oXHlpau3epY_@R@ z=dzu9avsy1&%L;S3%Llt;8%8VZ!YFOT*9T?m-}&l9>8ThkUBe=VU~hj%rQ@c-Rz;s z0xjC?<#IZ7>CtB&`?&%?aDan6hzIi!9?HXbIFI0wJc>v27#_>xcsx(wi9CrX^Aw)S z(|9`1;F&y&XY(AM%ky|XFW`l|h!^t`UdqdOIj`WAyoy)z8eYrm_$OY^8+arC%$s;K zZ{e-Hjkog--pQ4`i+A%L-pl)VKOf+Oe25S85kAVt_!mCTC-@|v;?sPF&+<7w&lmV2 zU*gMrg@5I%e2uU34Zg{@_%{E>cla*ff8+05r9q>nX~VQCty+s|ajizH)e>4#OKEjl zz1E;LYE9a3ZG<*b8>Nlb#%N=;aoTuof;Lf`q)pbQ#0q-Gv90=E;TU!{BP>riB3s=e zOi!_72=TV4Xd^1RO%`p)Cbh}(;)cXT z+fY`mZH{JiC+8^Xx=VIyu2NH~r_9MYLRhA5WsOX%UC;QQh?yaz+SPfc;>6lzd(053 z+Jh>p%-|85C*v&{KTnOf)cAQa-U?w@tCbrjtXw=DX}2ToX{p@~p)Q^Gt(@-o1ylFE zI$QN(3uInL=3SuXb=15IWL`(YLK)?TkX)#=&Q)5sNM-X>wndW0lU-O;Tv4yMq8)NY zeYv6?kt#n@)ge{+AyjubhLx-Ki_f}_GKIeC#XF+ou~@0HU$K`c_CU#wEtMJvLr5$w zZP>xmrWcmmt7k-TP(p0*^1+q}&T-_|ciFyEa_d}UMafHc89Cn(q21W)TN%&SJ;Roa zwO-zcth{bGl5J2pcE6GJ@}ZlM@mbx?C)IP4wf#<0U7n*jT z-cuj6BuL?8MWIMAJS=>TT(h*-e;!8`u^lHBJYQA*Dm!6+m zsBSb@apJyZOq*vEo6FQof0wN;Du(jvUC8k~U^45(U?dzM7$-bjO5#U0`Mn)0C}F zW=k`ZLeU`<9imc)DAyrM94Vm|FO=eiQhZ~svfx~4W<##L;3jpi$nbnccy00ekwk`~ z!B8|9l^Tq44Mu4#QiiI*P&62-hVBZ+rk=_=lcrLPS!z;~xyDROnQ4_uNm@#jwMLY! zMA?d1yHc!OF4k7W+NxN)BzAb&y%Y_*X7EoWZAZ!NC{;RZROu=^)va83R|$11L+jkK zz3M4*y~??@-q0TKmC}5rEq-NN{PMQ=%6arl=h0U?pr@&0gMitY}X3c^2u3 zN2fPl{QM36;cK&YuqxrD5N1oyPrAWPWfprDjCPrJ(7xhTF%aGmb(UY~6pm};ieADP zDzm{Tuee4;O|J>wcM8+)Z?v6kmnazF)e^3J@S>o*8Q)y}rXeSsLh#ksY0B2c4S5gz z0Wb$f;Q#;tc-ke5Jxc;$0EC}QLosM@Xfs5u%*;y5ufie1$sxien!+gv5vqUCR$2C; zX_lJyyJ`>}B5H~V!QOBKujwvpS79E@XQ z(xe<7y!e=5mO1>)6JWtSE)rsiWmX6iVU;za#E6q1$vOv|aKCxw%4?g+gn;(7|2#v`inVD5#La ziYl(8Hk8t)wv<*z+uBi9yV}#~AJF?QIsgCwc-mD^O-sW-6r8tN)6zpsB=k^v$U#Iz zM2d%=dr;68Lh;}=SXzWqp?LKw{<;1D{VhT}JBAn+-q+0Rdv6y8poeQLVC(8_gDxw3 zoIe!k1N%K9@)kUEgM(jgt7i<>FRKcL{Q2v4vB8ye;W+8g_y`@Om|}?!{Mebz5lhuW zq3+&>xUeDpZwD_lvKYC=HFl(KDRyK7(xqM2(c_EUeTzA1m~S#OW`;XW^+-5%#BCyC zPqcA8Ar^8=D_e-;yKY$J4HkM$#N-$-KmErX>-8BKs4iPujT9Y~DOSW&>M~_JWq-uX!zv$bc-noCOAEp<5QNY1S1dV*9_%9*1!JZ7 zIQ68}duY}gOxutIssCQ_i59%=va|CI!{=?K;f&U)rk>F_h$vuPG*Z>&jPCOFXhP>n zk5}l06kdF!EQo2+BZTmxF&C@Kqrb0&du*R0S-eg#D2A|CT8k1PzZitRsLT|W=u}xl zmMsEEwT8RcfklV*j52sQhHpzAe$%+OLSiNCsS8gHwxApcc#EJp{7=#4eFL~eS6F5I ft%EL+MKOyQ8>eZu%~zYQ+0Qo*U%e(*Sf_8atXT4n literal 0 HcmV?d00001 diff --git a/docs/_themes/ceph/static/font/ApexSans-Medium.eot b/docs/_themes/ceph/static/font/ApexSans-Medium.eot new file mode 100644 index 0000000000000000000000000000000000000000..e06fd215408b736e671061bf0d89c3e1300f3b78 GIT binary patch literal 169448 zcmeFad0<>+xj+8A=ght(vrlG{$t=lC_N1BYN!v-&raN6|OA1|*wrMFXPdpA}V;%i-3sBRj+app=bp<`F@`FoHLoEDa-eJ@9(c);mtX7 z&bvR$`)u!f{_tsqo;je9Acgp!A}nMw3n5fNhXg-avHBjO%a$c5&%gNJexd}?b3eR& z?A*8HgK2*fG)VhsoX$t4<7tdeqqA_g5>Ix}ZaRlfMQ#tR#qVACwHG`(bqjhwFywh^*#oIBNhfwnU`ja@S zH|UT+Dx=FtrG1N6uR8wCZf7ScH`ULZ>+@FsJ zquWm%+xPgT7p@|5M2JieZ$J0!gj(kMDNzN#&+Oc{>s0r_H+Eqjo_o}ml5*0WCy$Tqc*C`+3whr~>N*(@?0>P~x9Gs{nv+jG`}|Wse(ulk`*|W;e(&kq z$CQni51{@#@O$B@W9RQve(!i2??;e7aoX6a<8z*U{;Q;1d=2{Z$$h7vb@spS{+B06 zxg7l~ckDZJe4po?C%=I5b;!R`j@?4^*12Q1Z!H^mi=65o@SOhTl)FrP{lOV~_Dw!s z_`7-7^lSXI19f<|&>U+2b>Z(s=8GpEpS;IBtUs48`Hxc0Pw@9N!ff#aYM@?#i0B;0 z8u>(3GU5=K@#61;6QPvPffMEB$=u}qf2pLOfxUE&=@d2JHCH~m^z z6F}AP4E5#Lrszp3BLyr&2T5BTl7uj4iDmLR;UY zI*f0@5_u#q(X$8_j{Qfqr7xQV4xC`a3!m}P)ua4sS2-4$tf2Mx@0*^I(QXu}LFOOkGU8Ks_dDvB<)SpG2&zz?K(bMpqEdzU8KuEa zDn%oW;aVroq%=K38J-{9XVjNzt2lssNa)i}^TpIH=1{kK2MyCBlLgKz&YUc8UU36p za)<_ii37;HUYU<_=!1F+eq*dW�H-!t-y%+%_WL4&_c-g0flhXN>m%tp;2Zcz&fg z4RZ#V;Qn2VYY_9FmUWy+&xxN>5AqK%tR$QmRx^_N?&PF|9mB08iTRUnO4!Yxd=2-n z7GcJ4o1S_k%ouJ)x)S#cmsyh92Uz|Q{RNy#lkiXe2g3)~pCCO`gb~B3H1Sy5fKNQu z(!_8=dCbF3J^e&Ek-nvP=u7zhS)M;7JNYA}4&`wV`WfPR#QevZmiV5p>LzMO8j#QK z0v>TnVg3%#khq3AzdK~E;q?a)6Ugb}qg_y+kpBT6<=W{#eWCP}=i-eq0#(k#0 zk~x+5|0!ycXx1zf~R;OuJ>-xvmVId=oVIWO7^qusf< z29OA80e)`)f9L^!_#)t0uRs45u6Pby92S$>RmtM&1Ks=$`8EJnT<2k=#b|pJ*K)MW z&t^+;^p)#733Gt@KtpDx3kkxOJ=82QZi~9;Cqw_@{=6EwL z6wgu+?_2m@j{ALhC)XqU$Nl7lam|tzsx~~w80GkQjCzvu(=++H=$ZTz(toKg&@0a+ z&tpjftmjRmZRW?N$>7Z;G%o3#`LvP1uQi@M`3ul~7<7La*AtN%Bo7DwW*#mR;}EA( zUch|Z;N=ECH+Z_i8%nN{$Lsu^>1i7Lc#NgVz@3sYOB`Z6G7`fX3G?&<=FN}mAzbSu zelgA&NzXS84vb?uo=G?`EOeY>yer~f32Y1)NnAAWka2N@erK$Aa&CD}c&>~&VH(ue zBhX%lL2EjFO1_4EZz$3P_m}&|V+S6~F=8DDeiBPGpgST+N%S?jrn5{y5K;*%z5HjI zkU0H2;K*-GH5k{=X%AkgdaBf<8|4i=fmB14WC9n1AOcC*OaZ!t?xhFmyYypvnf^?# z)4QTxED@K8n-r{)$}iMM6V6015lzGssYE8xmgq|?Ox%@BB{L}{Wl5E#e5q(Eo@z`j zNsXq)YoB^YnVdxJ3Hl-(r0>u_(KGZ*)cO}XA{xaq)EZG9SDr_$O2U_jBq|dLS!?f% zTD>K;?m(?#a&q!lM3c`<{&@0fT%VjgJoyCL!N0T9uh2ixSLt{3d-^Z>BRwXrq(7ndSH&mjzr|Jbn)oFBSzJx8 zi~aN$tW?_J@i#Fc;^JOWExsiZbdI=BB*j6IqF3nG;=je4w38mECm;d; z8$Cs5&<{i!>efERNv8m}N1=|LO4~pKkHEWE096IJZUS~&Aivun z`8&u-E^o~9bAr3_^uv(-cTZ=@z_ zhN{;JEnp5PFGn5J3FWVwdY~EfQ9ljPT$%@IIuAu|0S(bYS_JuUF)g8`w2YS13R+3W zK@nU{YiKQ0&ULgN%H#<&LL2EsI*B&XX4*nqF@_uHM*0+e8aVPrV8qwyPWlGjMgK_O zq*kM0L`eVZNt<$i}A0`-269;Sb$N9kYaUokJ2OAP!7X6NJJ8kYeB&!j7W zi9e;AfNMVi2L6ox1NeC*{Rs5=V|tqY9XR?)`UTw$ioF`D>!tK8RMY3cYc7Ezy&b)} zhOP&;0_qG22?2)1BlJ8y1_k+tz;}kk&*=p~HYm>5(P!vdx|t5pXX$fvE8QY- zo`Cy|?O&$v1I}lFCNxN2MLz(vAD^J6#Eyx?=+5*6tsWWQ*gPTdATjai>IqZk#0lrn zNh8U0GPZvtF|m5}NOB^-F_xI<;k%xV8xs@eMPoZA>iDO5QDUNvZ`wG=qpL>}I}`i& zk0mA?t4BuhB*E_-eAmTyU8AwljT<+{Ca7`aM!jt`t~5CsZDs7STtcD+CJeJ+P1KH!lXS(pu+UM7E|qLV$uHe7!O6ZOfl}= zL?VU}80}4{wdt{iC~Iz-5Tl92gnMWi&jN1J3pP%8_}5ze^59qVgoPWJP-++GJ-OG7 z`+Gh3Ga*9h1S&)Q$YH8ouz!C7ZBLjRlIaOCX53?}vOMaL?>3^n#b{;m=)`5)jFBYn z6%-sw45NI{greWvuYi#Vf#Ku-<_T*PP1w>4zFk&kgO+U@TCiX!F*=Ht?zJe5%@ek! z#OOrXXkrMBk8+Fnf6YibF@T@x3Ey~SESbc3>`fE4#tB;Aov5>U!j0#&NJ0YBkBR`3vEok(-AmL07%!fn;jz)4 zdtD-kRy|FLfy82?Rm{y;PxC}sQ%iWDd7}KNcN1c0JKlSn?o}vMn`oJ^4UHy-C6Zu( z__1{V{-x<{6C%CfTjfFo18Bhqhy@e;Qf36)@Z8@r;lVrqd%O!Y-Af|ipD07VVAF&b zmngD_09vk0zbu^)jpO@U(uu^te#}_QFs6AJ=rXXeJr)EsijSCh4mADbtki70sFHH}RPMC*)BcqJm8d0f3qdU?QrlGMN zKst43EQb5hjle-<8$*{syy@Ywo>&_7aL4p2`LYz%Xw{)55DR{cGStkBm0TWWLAIbE zw4mZI$TgNuZk(!h<9>{pM=|TTQD4i19m9`J8-7L~K1L2yOthqX^jV5D-RlC)Oq<^i zHC4kOZgv<)@OaZ6&LHMvpmPPl2p#hk}KPk&G`Z5SG^m4e=YcIEVZA8H8y zZ_u7l{}+I*SIS_Qu-4&Kgl-7KEzVJ#$1br>$2HJ{#I^K5k1;>h?~!L#@=1Jv0-&HN zdP{zQ0+K+D=BAZS$!l0}Y{{a>no*14+tUv6yLeAoXk zH+F28HE_#Cp01Gv7HEiE;I-Q7VW3&#`)8eXCYwHZxoMNq{K3l|dweZT;PtId6D^ul z0O9o^8}xwfko=gGD7|l@d=Yp<-6#aY^&=1swqu>Prx)C3wx~+ueO4F$b^Gu)cCXzr zv|!`6oOQ~gwF~gZq%@*F{M!jxeW(1p6;?4)e$V>)q(ymW@=fLUikN(}z+MsU-|M&_ zi93<+WZ3+h;CpzK$odudRcerKA#&P~&L?s~_jFAVxp8%`LwXqLFnlo2z|r#<{2&L2 zyw4H&P+tJ`1d%6*_u&zu$Yt=2B#;gwy+RZnKtg-5i-_Xq6ICN6&}Q-~q7?F{kvEOH zYLK@UT6G5bGlz(>cviQLs2-`I4e4E?#^;Fu8)|-)s1eT1%`+#h;nGB zBY=cwohZ|}7wJ*Bg?1o4Ow^6GdiD|ZqFf)I5A+bt%@fT-x%@RmgXq@+)H8(pg}5$? z5Di~MwD<<1C7XzrqEE|Lz|U6)*AL1bx0+}b>c)juKTWg-W!B=yTGW61T}ZgD!*xCK zu0H_J9rB!jJR@flZLB1yTu5Ib+B|}E8POKJ-->p&qTJ|9L}U29Z3)s)l3 z9-?ax5?wb+biE6TBhrn?e-rxfDYSh6?>=K9x*6|pK|7y2o9I^5ecNw|Zbv`9@F>w2 zQOB2UL|=Ix1}2pKI`aG@>i#D3-SZ&P#7jiqT1Rx>(?s_lBzgep!K*+~n~1)H>v!?| zdxwbrDMIw<0MS3A&VR+b$MEd?2Z$a&O!VY6L{FjaAE2%u-bwW1I;1;@o;APE!hEx5PUkg(rI!g(hNcO3~2EJ@{f7r?vl zMI@pNNmO-^h)|Gc3T^X&p%1x z3p+^M@id7qqW&*GNCGq?{^2l*uPp>?zmvq**OB-J=HVYv*If}3-|Qf9_Xvr5kS37s zeT>9?c@hT`B<@H42c9SK5T1YMWD?)S{r8rTcm!?y6WaJ^l>HZ^e|?n1WBC2}D2XR+ zB>oNget_{leKv_7-a+C=sPo5rNj!4{iDws*_(`4wXh=MV`=8b!y-MOgTyVj!An|j& zd*L97mr&0yt|IZvCK9ip+^8~S+od=hWhka*{55=SB=3fGXBJWL89DVLF=K0}K6Nm8tc z8?f&q#Tg;RwU-q4CQ>|jUseZa`w~*Tm8AI2C&k}`gff8#NeMnrN*K>0c~Y>ZDX|@J z4C5NVf|Tl4Nl89TN*ZNrZXl)hF;cQ7Bs{ObgOr9pkkaHLrTHRKT2NnG6Df1>ydBST z$kTC{lunfGdXkjxmq_WojFkSfNg2Sqd8jwvM#>=Whpr-J;RGqeb)+mt8%t5&GW2c5 z0aA`zN6IQ2DXT}2P~X}-DOh`yb%#jVP)^DT38c{vk?tU6qz36WQcgs>C*27<=4zPp z=RwL)aBqVxvy48Uca(YDE~mq4Fq+sa>1gs(OJ(cfO z)ux8cvE>tsk-cVmcFuxKKrN%3U4%`se9QD~97KU8U@S##s5e@yn@dN`8#hk0Hu}8+ zR$`G9og$e{dXqQ9RJ64)S@?nCD!eT$M-H8#ymrK=oPVSsL}5}<_?5715taoWtiG!H5Ry3b zHvXZg0z3KNmHD#&)na=-RUNOYj72LVVN8JE=PlPT&ThkiB%YP?c%0~4W0_DDMNK&E ziq&O7KdeBKDz{l7j|tCFGgUc+&1SVp*f6p-a1piX1D0TX{eUIxS{poN-DuNllbxHb zB?aB0DEE>(7~`(;O~ew*ZzY?<)<_PkqmhF|lrps*=-$9?1dG#!u>rsqVYM*!*fF_w z9xsG?StC0dOl}XqGsD?t1{UC*Yy*F|1s5P8L6PfDhtt+{ce)#QNI9!a*0dEcvZjB% zqjE=Td-Ja56;HU~lhm7`%0uJaAD4j16 zn>LSuxDt-5AX6(QGn2`7#?@T=Jf*XvMaj0PogiYK*H9=NY*EuGkMh-u`u2)}4L#9| zFHD`DP4szvoqfHbcvpQzMRrc4e?xEOCFfdFr`Jc@0>RFn&QPo)8xjBM4>(lC>`nDF zt=S^B?^d0*oi?Y{WGSzT_(Q;Zb6GssaKeV|r>HL5Hk%W}D60VA&D1^lrurr29iUGb zJSs~$u`u6IUsqKbiMU;YTAS-~^|^GiGFz36#3CR$ztQc;R8&9>iILtj}VOdC3fxwcl)ng6YbeQ+!9Uhyl^}u>nv8(9FG3#Y6fGXf`HrY)K@?~VQt9FxG|DkFz zTfx4f#qMyG40R?Sk+K8>plgpaBTqJoXRi?$S2CHP1$Tyb0%}- zIs9oxs!#&w9eGv!Xl(uZvC;MG3uE$T!v@hjXJDYcePCeD0pq3s=6J*>u045t9MO(D zb~o|O?j7SNgR0x1_umH?duXHBpRX({6I8abY~$M1`FZ_)y*+?&CK4i#@HiQsDZq2_ z(nc`H?yiQU6|_mpT!D2W2%ZJhwt)_d5O6c;VXk=rFAtgQU?B`*3jpOYOW-g^-7r<1 z*`$C$lZk6Iy?>2>P_e!c@SwDIhraxr@cy-1p&2o-m~U|VdvxXzUi|$Tv1Pf3luK2rvD=iAM0R9=~pBW&H zwX+NO5ywM|H5keTQz(#5wSa-lQ@~I7V!RfsoE5hD8>;-3v9_U6v)SZ{1fvO`HQIO5 zV5+gVv93SeFt@sCc~^B~vbJ+zYu)NK^G;sT=ub99>^5K1;?B1DwH~u+t)hsw%NBV$ zYqQ;HuXwdA>a+qi{PA>o)A<*kmJYA#8t!$vw|VT*uJ!!`+Yq9-(^sB~_H7!houA8i zYUigC!+n`hrak72J2AD8JrEDavd4*e2e^)uI8=k0Uid?|HxKP$9mw$l;Zdy3Ml)zn zYxtP$l_Lt|0Or&~qxX5-!kEl3!s?H1-ZFpwNgFRuU*JDm+1-Cycw7GL^A?SUPwl&s=VE}a6;FfrJL#-E zcfd&{osug(i$KsM1WP4Rtd<}|5C#h)ied(6!9VyKfso&1WCPlmNR<+cMyW?LJ(?`c;P0u&BAyQXDozn!Z@Z& z+$A_>V(gIQrZH=)wJ+=sv9xWQf94)rD#Y@(ubktp1>&P>L|%Lx9j>{bd5wbV!+6gB z^KyYqKz6=FR9-8Rok@`|yaD~7W*a}3ItKdar2hL^u|RokrG((d#dTc*Pz%f7dUxdu zejicgJQ(W=^n;>qVCV(1p9GwJ3OK7$)ik=yJGmE)OqVGI9SriKwRKP7i$78e?*d{h zFb-f%sl=M%hs>}o?+XS5RaXVF!K~Ac_&0x1pMZV?g4Y%ZEEW)qaBmih;!#+IO9P^q zZ;t<}g1o>a%`>ED;wy9mo+F(t8lsb`b3l#tc@)t!rz>hUN4w@2*L9<9E?3*=I{m8L z*0^%-;NaesjmGuJ3Ekt@ZydQ{yt{k+hLMfek0UHgu<=#=5#w^u6L}YtlBQ2c-7W*H zO+orpfhPC^(6Z=l4MB^+AR+i+?*K_9mKVKhJtuUDM&Pj0cyu-|uksEC(9Nso8E)FBwnC1dElNe7Q-I4cUcx|oC+1fT|J%~aH>Ymc2396dIbnMtB$Dl;aM8AxtpEehtO5p%d|)z~)d4GwY)uxl=%Tt;wF(dimWwnP@y>fM6~_&4+qR?d z48l!qg~LhlW}#*4Q%`BM(hJ;5W8KZtzcFrAS4ty9iLV7&PQga91@kx2wVJY3=EcB?dc_@%9BZ-xiM-ZjW{*a$ANQ za%*?F%{G^9yWL|qJCg%*TIPgQarM?8D#FveVd>g!9pykN!p;sv?(&+^Mf>xfj`oJS zL_88I_gGBD^`gBClLpmasf13And6lK0s=#kCo=|N9efAIE50|GJW_q@olywGRKXLK;e23Olu2Z*&tz=Mg-|~`Dkqo%K*LIHOp(4SH%J#CQTBs8|z5Rf)-e@L=aePWh0=3 zk7g0jfU21aY%mpQQ0>$U0E?NY0-V!|s-OamsWL!Oj3o)3tLog6!6*fJ*1ZiNXAIe) zTa)%@uwK!WYKyxam2KH4-P#vq!o{2J;)c zA`V})JdzGOV*_L2K4znVhEyn&Y6$r2lc6mO=Z0D`kw~T`G_SqIu1{TFmbUq?Jl=-vH*a3*s_p1(X6(WwbYt+$7g% zoo=9zW8vzZQ3!lON)O=L#p1Bf06M%{Yeo5@&uoc`$}}UiMXK9U)Jcu$s;ThBkZ_Ta8_&y{#MK+lS&#m_V{iM8XRysM?DHtq9H^SU(ELH@{s8XyE<&d@KG z&NXJwbbuBaw9*X&I>^nE9pVeDLy~}yS6eN#LR?l~ElO~}4?96xv%z$0s=PGVOo?-+ z8OZV3VA!QBHCY_drerV^_c^P3T3R>6J@w5^zExvgzG&N!p_+-g_HeQ>7Hsr}>T0Xq zHg82;bwzzux!<|X;;@<1{mZg#3tCc`lkURbRi2Y_+9D?Nm0quRi+A(!YZeX78)$3E z)YhaE@le2nxF8qvU@s)Al_?N8Tz`Um(5>>qstm#~2BFttf#|LoSus&eChV6g5AjBJ z=q?Z?6-d4z*j!=ZjgigfvxV)jzp?T6M7>h;2dxzBzO+tF-J7iir*0uW2`!k=ZEnGF ztZti34hFJL;^iT<;^3YOheO(oF=#2iPalK}O)SSCHXN%7h+2@P*s5$%?O1?VreMjE z@64!N;Z~Rc6s&6euyl%GL)`j5S24U)jFUzK42p;yLabCIG@(UX=DJ{~DdotaFtk4d zK|Lm9GQl+69hP}aFnQr;9FIK^ZAU^~wY5zVn=R5*TiX>%IZ{J4^VhWp0`2SO*9@f` zL(P3DcVPGvXRX|KTx-m3kF_4RZ{_NXa-!ps``mxozT7L{Eh9&zEzucc699Aw5l%| zt2oUcs9QXC<|TuJmz+7axGvtka?5G`D=*sE1{gD6#h)vI`|KHfB44hmszcRv6y2N2 z8cG~+(Ih3X18^%%HGvJfC@rz3ZsbB6I;#~YrWyz|<(G2|`sG=$A zt)+P(nJ7>|>amo7GPw|nl~-N!YV9A5XHve4X9uH`W* z5+8O;e5l~K1h?B=;f|DhTn>QGkidEI)43p^4=h4Qnav~e!cZI_E^+Vy-pFVR9*LsB z{I10%U|!fl2l*0suPu*y9bCeQODO76Y1Lbl1|Qkix2iP|XkFE}Z}(3(U$`_LUwYx@ z9ra7c2L{HM)*m@nd98Q+ldD&Ma=e$vBjk7@7>^Ut3LeAyz;=@&Zr8AK5AzL+A4P>& zYSoZPxZBW&jDV74NnRcUa!l8vp%yiQs;2a2qh_{AYJ!bcD;L#6Ndzm&2qEFW;%|i~ z#EFHu;$I726}@8ZYde(Jc08w#qa5R~)5r6!Da(8jChUh|0h~tx8C2`J9EEKvT7;Ju z!A|R=aw22|UC(Imr|cZaiT|Ul^Ky;+W2)C)O}$Tw*9y;`BEDO=N3@Eyg}Y^kkx!$S zt)Q1IM~VolNvD$W%7730T&azt+YMy$K=VjI?y3ccfuCM8dGT1JDOa}=N@SU`bL8jE zQW8*sRah>n(*7prL!u~~&Cn?>&sR+fW0QsE`kFykaH+vs=CxSFnM^xOTPyaaQ#~E+ zu`Z3ax{}K(KJ{+n*>2FzYNo78?Jh z&^5MVqt_;2j5TC|gae}~=thh8;k7}}AYU<{Ij1BoEsCZVzgZ^-*ot{M3(zmdW%x#S zh~TlF4T1>I6k;OGuwv>H&mh;bLyu>k3nT^5bNHL{io=D+ckdQ`yET{>o)kSC*~DYr zgTK#VtSXI6^9N|o2RGhYL+lj5xQw8n59_6aoz1%kd_KvCV3Zrj?V9eC@b(76jE8IB|-^iErc`4qk9e ztutY#4P=0R1bMF5G$Y*t1}0P#U-JtFE@#7%${ASBNEb8Uh0KAlD!!u_O7NAjph4!6 zx+5k4PocX4aI7q{ki)rUSxNSc1Ad7C?8!;15AWI~?E4EZiv0e!MX>NE%*F4S?l?DagqJ*??}nNAm=3SS*^wOof%sa$yjywc0?l*KN?+QNRr4S7dRDhes3L>gl( z0jyc%g7{UGS2S;7Qs<1kOfYPioQ*6z-MX)EhK;J5^_t>(5m%Iet9H1rZ@AWFT=)70 z&e*c$i~*l<73cNsUe(gFYImP;U3T@U3l^Mu^|Gbc@bwz>Qj@tBNj#15>K?1`axhz| z#3e0ph<1@3X44#x3-nV|KUm}j#+am))-*Gx#8Ry8@;xtpnk9TzQ??%7IwgEDz3X13 zbo!q@(cB$(I^*5VpE%{UCEEvLEI?l&K3b7!4ToB@5f;EQgX@}G)(vLl*oG&sP<}1p z)JW?eNF^9ZMJ^C+gqUs;%)=p;Fhj+_6r46B&CAnX0yFWJ4Qt2@mc*A*$Dd}PfK@Qb?BF7z(jaY0^(*3zz%H!WjJpa!$^B+L@@ z^Smd4KvDy0Q<7x_Vx=_7AwEGas$vRuyd;-MmNi;lJT*%g9#sM`WGR$6P4l$8CGgY& zkkCIg1`h{c*xj4c`(c6tX6TRaQ%ct){j;Ef1-`{_T1`K4b3MElwpe@p&AZ=NvaPS8 zYdF~4+!VO{v$u=e{E4QDNMpj!_z>@1)4pt1MRwi#bs2$}d&)zP>IO~>&`tSrL7lmV z`b0eF$GJ-b#kuS3hN-3poFWQorUJwXsSs=fN_9PyNJE2^nia^Gmk>lnW2^(mF2~0y zvJHlb2sE#wlUAB}fNq&KZ2g7@b-GV0n=^1r3XEc?bs*^q*7aq2N9H9e=AXVk+TY(D zh&Ig`sBa&tD=$y?)`^9i`=f#0@ihx~LOGk$rraDrq*S~%=rmhnb5?ZMty|P&b$RW( z96q--RNGz|udDEw?eVtZw$#c6jS+~IJ|}k@TE9{aoU68x|C5z_wAhn$$D$a-}dmaz=fT z(#1ppmH`)NP;A`&xOhw14}DAjY^#R85Tm%n9NS zc%8uyz_g7q_~c`LQOVgb03&$3nt7T)aD>Jp5cOb`=iEGkGz4cDW}uZGGnYPqTK4;} z6z4I4;hW@bToGG?eYu+5vF5&kzHk_Snq#|ba(yGVPZKz)4=BmcVUm{^ zjnS9@R=Di|TNxvRQ=5u|gQTRURs5gxi%*T;wEK*&j@^9A=vU8R-QjVi4?|sZV&+c%i~}|hP24?ICfNEs91leG#CRjSsvG3gG!J=O4*2-pC*qN%gY=P z44EXRA`o?X9LuyO8FDqO#TniaEjMEXXwo3I93(;v(2o%+uFKv${IP=vKX&-d%Md78 zxUO)8XevC1W1h||yr{zyV}y#10HaGD;GiT_Z45AIVY_2d-$4FS=3%^)jYYfYvY>i|H;05pw^WRjG z2``xNc)5@!73Ow&y?oV`zFeo~-cAPF#pQ(?#gW3r;>Lx_@Q#H?zQasU(DKQ4I)wPR zSv+4;x)yqX%$jn;{o!s!WVj$AN;sv$OXH6*E#|$Q)}Vu}?Q9@kRE>&vp@{les_2(>vZ(cA!74IN4PIRu;N6??MIe z2{LjOsV9JHfeIL;LQPD^)+wfb)H`T>Oja!G__T78eKhhEF=jy*W`>4`l9}l!XCPKM zoy8UjvGU-8uYGL+_E^1r@4atJIAHI($WJZ-lJ^_&l$z7VqfI`1_56Nfa#JmITnciw}WzXs|R=QnF=#HWz;P#tU$~ z=RG*}89qALIGzADdT=h+!+BSIwk92~^n0Clxo)!itSN#?$%Q>`hzhk3oTLRBC0L?{ z7fVxzfcC})IcCpo?_CCYxgeO{RE$jD(M7a?TBxv?3mzBSby;3vWY)^^1>zM?U$+($ z&3>a!8JUg<86$Abn=WxTb$9g!wHf?jS zOba#9*1TLvG2ZTQbTPU6#F!f{Y6Z5l3fDHXXo;8(TM%ZklAWH+uwZ*Ivn!$RJ&Iso zmeiTBUx34)r{Xybcu0ee#p*Re1bN?p1*&&x$WjK>^i0HE;?9fIes(KM1T$vJWL*8J->^TF*^y*iN9A@W1!oWfu{ z(%ttuDi7QZ)T)UeB*MqAj(pOKa07q~~ww2!%Q} z%*So*($?CRsN-$+JC?h*dR@k#T;8qja(E;epLkrbf6c@v_L@)O6Rg`NFSeo~R21tj z%-p=?tcVA|CuVD!VF{|Cv9BrjzQv$Pa#5Y1*P0T$l0b zJ&Hgq;c3;v;uxQdHn}FIZz)ht_aJ*S{!*&u&^MTO#>#3VCEj88nM0-iVIbGIxW#0d z^7e|PwQyRA$Jfof#Eg0WH73mg*ZOt&kkcv1S?{c8dyuybyJj51VKW3AP?us3Y0?5V zS-}?0M1cig=Tas>Kt|Xybz>HxPIi|RfpNSzdBxIFG>}Aw$zis@mnlusnu8Gu3N!1` z(~%BwxXZv=e3CdrqIk~ zIT(@+5||1mFa$uzWrER&7va9yiejWqY++y<2cv>4E$wJh^up2%fx#Nf4^9=v>Z7-d z#sr%pW>x}`frSpxf#h+r4s8RZjHa|8_z}_&tC!t!zZ*8!h%3F#!A$Fc1LK=Fk8HDs z!eN_oquCNq?;0uW5Vy^X?q0DLY#hQ2mL;kz{2k&oucIkCLA2+~k6*j`xc=UTY;{%8 zZ#B!vTju?J>wqik(T~NAfaqWzMcFY0ro>t##;e7&@dmZxIskhbf;SDaorfAM?^T1v zf_XMV<7U>vQ_t}v^JlLGjvTWE^H&;}YxJy$&ayN4CI1&S%ogLSn}iVTA$KIoi1Msj z42RUVoSCJKG$r?<0TQ|!UT#k0J!*iN3^?PxW}$E{2tWO=>bmF6ukLM%MiWn2f_3Rs zeaK=7)u+;RP+MF4Ei3!%KAXMV=J>+!vW6vFJNBGECv9^C${G^;zGO4>&{l! z$80!mHySaCErFVt*Bh$|_-pxE>#tkfSuHH(HoH~v+XjbQhZ+MbH#GP8gtIabs_{fm z=-?by%R+yUX=xZp{U6}vetItN^83o%91v2nFFy??qq3e2twVQXXtd8;7eQ8V9vC`D zX;xq$#4_NH;XpuPC{c$0psBQ0mUlr1#UjYcv{~Xo$Z6Se7DSM}L||d1J_M-4+>%~Q zH_sRhTH6oEc3rv9hl1X;*w>QI^aaMpV*^#2PueB6rK>!S0*rUOh_7E!xPy7^Je*nZ z3*}ePk3)1IU+#7V1Kx61$Q|PNxuWGR!8}GXB(gIf0g{_Qaf-29ho|OvV0HqU1{eZd zAF#_YO-iSmQIh8+-%!k=O&yO25i<~j*d?vk$yZ$!n-2~1)OK~lpJ3L-Jf?(_<)RHzV#af(z9YKT?kJrs#l_^fbyDzBDU?QXp9XVJ(uQ9?hJn$cZO>77GzSO(1 z?~Wr5urb_NNh=-L*#eOmlc9Oc7|iGgTMUd4kYq6rpvOeu9)qz*IVIU2mjEUQjTWM76j#9y% zglNDH1T&Y1E^Jb)AY|4_Ab{%iXp$(zwy6#9tcoy)@^(Nqv0A5YnSWt-GcP3Kf$AlR zE$er!NcRL(RbF;2a^P8RQAj)=2yR$jxI=7Q>TxK(kbYMTtLXe5y~WgmvO)JcuV;kkiDh1IWRq+GSjZGAceGC}3ykv2>x)@|4Ll>j zWNmqvo!2A#bY4GI?1S-oZLXNtpDB4g&(PAE5@s(a#NhR;&%7q*ldbU{pq?H)My2|Sy9)YQpLdFiy{?5m(6C^)R_~k z>rX2t0dJ^9Qzil|S4L4ovYRurD6iP8U`aI{p&xu#RL;;QD`zvy=v$wGJjX7}`kkSi z$p{q|-K3@u#b4EE2-=90+%j2j`kD9D%{G;)W`$o`#ZA*xv%+C*-Ul%6e>!^Jr`uR+ z!AW3aVVxLT0NIR-fkRd>u{KtmNKBrhxTfpGU^r975TsoKrrC-X=N&ChD>_Xl=9L!n zUNo|xO`i9eWx%m8tB^uKv@Csr)kBw=UY6}N#=O^db$h4j#Rzhk8CfmH-q*^q$zm1B z5+h6DZeg2gWYPH99!yO+-JW+KbW=*Naus84rm-nzV+xcp#m^*CWNDajU3z7Xmc@Hj zvMSCZEm?Awi)_uvo^LxU=M;OI#?|mxt`SqF0+^p9Czp(TkH4<0t}6uIG`QSb=M|r_ zLfw_Tx}IYY1@(h3{UfUAX8Tb`dvjw2Jcz{=y1o_iQXAZ9D)x{uHkI0sKw8X><+2T! zw$027`w@n#!4d-lbd3^RP_rNT`8bDC&+wGxOrFA{DK$VXT??D*`3A*r=G!JsPm zU<9VKAnXFF;s=-EHHVFefTvXU^m$?Bl^3Y4*^04<1FaqKlhLNAQ$~i$No6A7SOXlw zqaM{BK!Lz34sw!He9ZDtG1i*Ws&6~#$9LOd;NqDeC zd8)HkRLzTCQu(=&!mE&EF*1R3JKhw};k=MzuIX&d!NEQt0$#=?TC+mID?4m&#Wnq? zY|_@oD?7*OYvJ75lu6H#RwF2i88Yv@=Q%@%n5Sz+_`)Jm4mF$f>@T<=;;dgGM$tMdt-zf80dr!ZbKk=cjj51Kz;NNt zbT&IQp_j4bLphrPS;4+oIAYH`mT*X~`hZR|^8?U6}wN#Syg2=(m-=Bz<^@ zFX6DwB6o#B2E-4)8J?EUW3a{)t{~>HFzFF+X(N#Y266#B1K=)xRcG@R( z*rQ_~XZ0OpR7D@J<|zA+9$}vAPPj;uK{XjnPyR(*Fo}10=qo%)|BY;D?5XtFV!0N>|%qlpfQX=JMX2&@S`5nV~TEt zvAjREdpF$AD9gU*Kg#hRpZDO5x*;Dt|iYuZm|Xqm8bTTd~;V4nzVG*Kzt4eaTFY z(xW(S#;KoA1zU~9jF67^KA)9A3iQdK;oqMf(YGvS5-HZ>T*9S;BCLp+My2!m$J0HXEe zVIGE%(LY;B#PRS-ib-NT2h^v9UZ7g8mHi3}rb_d!b*)|8DP}(4ab_)$_*q`ph#N@8 zmFSKIpi5Yn*n)B=j_zK-uJrA}Ik~n#W~?jNkPL{j!e4f5-W=^*qr9Wqoc1kluRB^F zyKsa9R_4YogfPd;yuvUMXG@rb31=F)5D(!ByTU#%pakehODXfj|B3TXnZ8P(nJ&41 zp#-x3S#nJAwq%d=vF^p!4ePF0X51awYup)| zq4gtejmHqR&`^cQiVLNk^Md=GFe?_%$BJp@PUDzi&MVh51aKJPUdPHJqu2Q0VYZhV z6)-w8_F-gG3p2u1m}7MZv*C2NwG$_zy?*u8@vGuj@5f`Bu#qro|`k1WN{l>Vu zJN;(|UVlAsw!gDmj*xr16EQST0T$EZhe~9R0OOS=dLW1uw>aX@fW=YoN+~5^3JLojIS}L9mfq> zZPuc@ap*m(;G(Fv3?C+2m&V!arC3UJ!)#^N|lZu>0 zpRn<+EgOii$}b#1VdGon$_~pm~tv@$SRk zJ|{CAYdDzgeq_&{M?~E6h9Uzx zP;lM@1W{h4ff^h=py8z^jPY?@Mfc}aZh#V&J!GjYDJNqtHTAiL7|U@UFhiu+21=P& ziKT$Mw$>A1VA$=(4`|3_nwtRHA;zd}rYeaDEPbrS1Xi$^$XSaDp!~(hGRl<$qql_9U0CtE zZz);cvEhQHxW3I&isK>~^yg98AMM=K8T~;sXasNskNdyhk?AjyJW6Fsai~PdP{ z3fY%1-Fgh!NJ<%?TRBbg2zXI4QNHPKuAZTK>q5k*=uyXlg9v7%(EKUQ*II_=W{0C^B&K3g9%c0$i856Ur5>vSnF{=Yag zzXX0CM2akhYYBvT_6O;8=tl+E-umz7{a!_RMaJ1O$AF)aF%4QH%S6nL<&B}t%5z7x z;|>hRM2j*QtesO?Ij1&gT*Y~%zwngX9uPI+!4G+6>VpWAzeqnb73I~tK3sYAu6J)x z_vz)=D@(+S%5O2EJ$#Ckw+yGkYEjS^VL?no15p*Xi&aD3&xHSZ(PVK1v!aKCxS;wepIX67`MzT zJ{Bi#n#r0Q=Nn!!mKufZg5!78B%efX#mPm`2TYJE&G1f3EfK?zwO~oX;rCWvRA6gB zZER~mU3^*&3-37T5w?Y37$STpo7aMn`xptIUltwlPz4aJ~u5KC!;Q0#LW;eux?ic8oPY;m9} z6{|T|3!ijnwIW{0iFx8xTt8~vnBSopH6S;a1S4h)oT`q?^D#_N`ATZy8rd7LW>Ba$ zy&o|01c5#{08H9&Y<#vqpl8nb9YypI^7;J!JEioqyE6DT3lTkTzyqJ+ulsAk`%Z*rZBhIYVs8OEv zAj}+qWokeH)QqXe4d21oET1qG2V@Bg6ecqwJj|AQL~#g%4TNr?)d0?&<09MN+3Xu2?0` zyYXY&_nP=qBW|0iynazf=fe7O6Mk&7yKUr-%Ed&KYJK zO!4HA%U^e0t(^&l} zRspp_GgIP*O^YZmEsP+Z;gYhjh$^P!R28hT2#$7QyP?q=cI(JRsI(%@RO!b8!xl=M zOON$N+q}TWOa9_Z@P#8saQ23EaBwivwPBtw*VXCwwRLueFBIS2^7a<|e|h8Ljw%x- zZi~fbw`@m~7(UiEWexBzBm#MMMHG!cd>)B*Xh(6SZ^kMQ?+hjj>!uy^pb-jAseuU1 zl)&e)!-HbC+w5}gm@hSIg%C|=Z^C;SMg>8pYGy4sfyjtB>(+{ZzIZ$5ECpFAkuhl8 zhZs7;SGEZ+vtc^Hr=}cvT$)uwuq{5eO%tN{&WZ>+o@RKmY*=9d0$4;Ca zIPI+lh-JaDX@P%4$P>nZc3DU(_<9KVuZl%A3!96!Lh^~mKtsl5MS+Wl4_0Kxw)X#_ zZcH6^L`hatQZwu@C;iV<3UXoL*6233!O|6`9=;J7Mh*l|^V%)Ha1qAWOMs>D>F~+s zx}1w(CSzNExBelMX{R}b({WX7Yg`nosr1%#WyJcfbd|TJyY@)BH4^MUdF{eowbfni zZPB*5X?M6`AR`u?)D^V(tHa@7|E}X_eJfVkyiR{dRlEj!l*@v~-JbeQHTC(_iou48 z^1U9v)9gz$R>W&V9;++fvZy_PFID-n3yyjpcFXsSGdq;^_~r%2ScmC!)*wpGp*rw&Y&qjWxNr zI2Xf*jYXN|)g{>HK8l7aa5C&5J&=#Lw#1@*mUGKs>tJm<+8k>R`aLe21zJW$k*nYc z2vzZd=fOCm1sS0#*++0$8C}KQg}|fn=yH6%$HZH4K=&4l$AZ01`6V-}u;Q#igodCT zsy(VQRA7Mw!=LIY*2Fc!G6cNi&*{h`_4F?)X}%%e^blrgvm|ISY*>u+<59%AP2Bc!7UTB+wp;;a_$EFK!F6#3iX{z;B*sV)idIrrV9B%14$>R#tv{%35 z4lcqx!bXZ%0k)C4$#%aD-|kY1cG9e7E`5ttvqws8rkixrDZ0HHz*qds{vVI)7FQthw*9d2dM`Fi5<3os+`s_ zrgF{3w|GV+=r?kzHf@QTrDJ53W8&j(*^{8v&W?nYT1~(_{Zt$)2A6%0`Vdto#e~`F zGmg*o^H(--C}|hA8z2w?OO+Md(X;xQGRA2-S>rqGMWUHvKnQH5X;$eSfb5ISY zq8?8)6*R7u?b&cR+g_<(m8;F>khP`#GqF(I-PeABHIS*U&hXhI+O@%nt|Z?y9TnOfY?7|1LFR z-Cc#bDWe@|L8dc zh#7l#^#wO-=MV_QRa#_RWkA&DJ-lC@57y(wR6>}2nRPs797m5pEDqoS)U;SJ1~se9 z*s3f;=vmrlw$L<{pOsrZzsv{c597yF>^v)EEYY@3F9W>6pcz>}&0MC6YXP;gZ%Rha zYe36QFUFa)k{rH$R7tKtI+{>Q(FUYx-yHgl##mxu9j&A=o@=ejvK&09DG_pmJwm{Z zkj-o`J-AD>q_E{q+lJ9xvkN@`bIDjSY}3g>^eUx+oPNO6fz3t<-UC!tR+cDZ?_6c9 zf_M2AEkzA5m?7pH!-r`Z!-3gZ;v+BhtPse$1-qKPqIKiHU5FOsj_C95GPn`_9?)0d&2qaUz`uSU! zO8oa}YoMm8swQ9+fmN#>>|NCs;sfbkJ7tw(@zA#puXoO7mIDE`NMrLaGsLM zW-@T)kAGT?H#bsd##JDPSxhA@L2rzhwP!S-{P!_^L;=gOdC{Zo=9 zlW=pUp}DcKxgoQ4?dB~bBU?7FRXY3H=JfW?Y3mysA0IWV*Ktuk-Gq(3n`ylkONDOy^Jak2LpZE-2iYhg>cJi?jz zBpQ$54=#+5Lu|7&`l+IyAQqS~WnX1^d{7ub8*E^?_NZ8i^8Au?&56MiJzyJ;;grEFb2wNbAEDQt);hAIx0?sgy8Dgx`|GsCr_tw@+ zV)BrA0+OrMb;9J|VKhHo!gjdUln)5-> zBUc$40|J(Uqjai5XU|1E%#MDo$wuA$Y(*8HX|?nSGOd=rRa`Dl7|Fp zHkA@2evxV=KU9XT4JzUg8!7duf}>ka`a0)c=+O4SM#bl z@lLy~KPELWsg;Gha$S+o-lzOL^_`pWpRAOz+pOQG@2`sfT=|;!<_s5>eo{X~8k{|! zgxAuo$M=;`52zyY-3hLxDO6Hm<8FWiurZ<7y1ztZmzTWFm0M5xL7nG$z_RjL97WsU zDJLftwaEX990cX}cuUn#POfz6ay9?=y+0!-tKJ`i?v~y^06sKGo4=O@QDCHRUEUF- z09klgG6pe$3R3uf0v3N?O9| zm9QJHCNqk<4xy>~Wmzp36-2b0mI5xVV<@O>yr<4O&nmeDAT~0VFl>Hd5V=HvaD{`& zFQd6HR$EK9JZe)-`r#NUZVw2F8Xz2UjAf;&PQZ@dh6SCBUhBVXT)Ex3kgE{lIF0cE_tA-Iqe zLjma!{ctbX6iQHD-8uU(=gG6LqUalSFi{ukp!4NLw>see9h;>USZg4pj{SI8EYA0@v>Y|a+f?C4!!GnzL3j}0f zyvFdMy1KzSc;!0NCxFcdM35FwupX*KRrq*VRZ1SVj8eiLk!-`BIv8HF=gpS#>xy*? zLp|oR0ToGsZ5wn%9ye|6MeeEuV;s1mhQm3lx#BVj>FuFf%^LtWT2NamTQrabYpddC zvCaxDwrA7M@a6keazaXITx2r@p^yqD);Q#5Ga9r=;vxMfeekmLCp{wsUXVZMv zpV1c0o0R=<0`Vu0loM+!^Rwhn=2mr9cce;i#jQ#zB|6$!2Ck$r09MP!n=BpNXp7Ys zsUzHOxa8WA=DAS4Ha@W#xxT9IwiQ>*I-Ae>^{x|subCA#pS3IPvv%SRc$58ONN#Cs z9{Naper|el2o=R?6XVg z<#&7GO3^9g9h?MuO5lX*j++I&jk#b)gWm9x0@XsxB77_I1W{JTqgiG^)fd>;pc@zu z47UV=)peKd4?0FTe>E0u2+~=!W27ihAamUw29^@=UdF2pJ1L^cd6*$urBEv^*q#1R zr82zC0(+Hr;GXvwDub7e6z0G+()xVZFVH69w_^ zSR6lovGl4V+^3HA&bv#mJ<3vU$qu-~ZB>g(-q(5S3*|H=ke(ds(?(kI{=B-eoij0D zZen;psiPpO4V&X$)<%9J07%~0Xh-e~+ZR@oJ)}CL#cWR_sl>1;6B-lLZboHCDQ0^b z!*C3D68xV{JD9bmGDNs4Ma}9eR{QAVY*@*PH~!WBP0CsQU%qZs%$lqleVb3px&^Eu z>wlKBG#T{wWss9EI20N(5`!bf7i%Pj7LpZJ95@x>KcFAn4`G4!ek##J_07=`jyp6( z$URgZ6_!txXRf}j)a&T4^dtY2)$Klm{TW)Z+(4|rln{vkXKKI~Xxg{h!qa4NzoO)c z4>eg|$`G4Trca+|b;m7Q-zulOva>UzBmI5Z#jZtj%Q@?t$8mG~Td3ziWeCfpMKWA) z#yo+_Z*D_0LG&9M^{|Uajhx26>-}bEuAt`b&=P6RP%HS^&*YAJZH`_n5j#%#ppx_#TAcrY=*iw~hn-w-zLESchg5r^L7qvOq5qZPX-gp#h)vBPZ)FB|kBfPdir z0{aC!hOCTiR7zG>z1W$lSqaX8Qwl&bdF`lRbGQx&_TMneX9+N#9V23P_pZy`rJGh( zZrs@&A6OV3>Kj@t9UN^>$%LO1qp`jlX8K37;XtC$yYt{^JT_1oPo|^3HT$ho16Loh zj}Gsi&gG_e59b$(xk$J-n?|zZ?&4y;Z*8{MABqIdwvu|c7*XnOe zrlJE=M0BaTP{e#Dtdf|e4l*?{S{xhz%suA0!-&jW>A^CTa)}OjSjGSV_aT%EON0fZ zy9K6C0-mT(szNB_G1SmuPN57zJN5c@MnF$y(*sJDIt~t$u~)BcFbaZx0kNP&0!I*b zi9XT*$6&amjQ=hlsOv6oNW2RnAq@sVr3`MqnF>cU&M;{-ba7alRiyeh?#}rwSEV9m zw(`F1l%}fP&zUE8Kb5`aU%a1EXZ7soYTbsobXB{LQnJKW(hmskN#Ewydh1OD-~&OE zG^2AnITL;u5i`l_dmGz{EBz(7)LZzwk_2r<1JeeP@wZb z?QX{{+YjM-rH^&oS|*Wfza>Sq6TL8N*cwGKxb5N${8=7t}U~L3b)0 zTGNXMowK@Uv!USW0rs1fXy|C7VFJT&AYH!Q;1RBCI1nxYh329mtie$5L-FXEnQ>gY zosKi$t8SfWd!!g5Ay?ZNkP-^70&i(L4ZaGpR?5~Y)M;$i&D*F{Cyr9E`QGOB>J(r2 zyv@8KW7cu;ymIIMy}Q>DSvNT{WUM0&6&-U891DnhJw87+3L{Y&LWW-^9YI}AGFA$7 zI6!c;cocg8WtGCu^bkldMxITQ^5CfcIb$;07zSpf5yGwGAsL&>j3~!9nrjeFj9^Fx z9O*gZ>?1{lywa@@hx5Rh5e*4B_13KnSOxd0A37M)(t?^VpJOhJ#%7;w@Eq~^W)`5= zZvA3uoW7;fww=4CMavE<)^r4!>|S2(+_QUabqVpwg?yE~ETxB8(9+SFkzvr%F3^$> z^*5j#uW$unLf zk^r?GZ#q!5IKQ<4iO^A&5?S;_LZrNQuH@FQHZfHv?Z0E|^kpg}ndsiiecPcd#w{wj zl<{EQ7HO-+rLEyYe{Uv5jGV)<_Nry#?%_^mXT1Xjkr|2X2zDa69LYkRNrF^>{F3r&khy~>ZmRM4Ve;0< z%3B%(=h;gy7|yrxjU(m^MsBZXI~}lGc;nm0ev1LnRc)wvw}#SHUbdCHXdb#=1^K-q zc*~Pwq&L?@3Q+`oI*&ZYUe$?=gPX5nb?x!v4e4&5tX9S9N;4j2ZFZ3?y4eT)u@mP!ZPh6 z4I6qqneGt+=y{CQ67Wt6vfiZ_W22d>_h!K8OvNON|I3V%x;FFJTn`ZF!Ul6QWG7ruwbwD zk7c92KM}#?Q00xYgCPV!6#x?OV`9_GGpC0}?ikvABGY|%Y47!;(aIO>)~>OE+3RO! zZ!0Vv?QR>qX=KNdnRNTab<58d?@Z<=vf0Uje)Ids6X~j=ueu;?@8JOh4^$CYk*@BpNMK^$nWfbW z2S!XG*g;()HXe15fz;7iOX*uOE;EkI2;VRFwYAaK+&0xVMgL1GiHaR@e_Uzn4lZ>a zRXc*+plCZqkR<32YB9(QH?IU|QDX<>N^3!6h{h90v!Kznmm7@|KtCUqVtCBl0VPn3 z5|vT3_qu(hhK*?iD5f{=-N+rO)7HQk9hBq`KTXqOsMlzhb`o-qw57+)u^fF$6em9JFgxNBT&`0dhf|0+)^yO9GIR>|#D3On46imxgCVbIzm z4wm~0g|2L2Phk&z%VWj9-t1u4pwT3peO{juf$%g!q2RmsBeiqDj}X@0Dp5U+#WC6< zwixW1H$@OZijuQY4_Nk$f@Fd%ekyH%zB>cbp1^)RRU^Afz&>{XutGFKHbQlsHyjKo zT;4ZkDjb=9k)!S?)gu4)M>7EZCSLB0+3}_i?cA#X#lBh-Q4NkpmR=N_X_ zCQ<=FlweM`;2D5$&b`8h(KTqp)~Ji4E5GAtT?aq|TKq)|37cds8(L^&azUkfGu*}i zq4*T(L-d0WB3T5)enM^ZQ~1df1{josGsV_QK+`6@^hwf2JfFK#6~zJX(*z*>iXIa1 zU3(O3wbM2{?XSw6UD-UUu6p^HvsQyujD_oiMWVZpi-e>V%Hc|0H3*Fc$_axF@eNzl zG9{Cp6UpjSOU06O;9EfYCCPmB;QVB6iXEnTHSgK58`Dz2+Sv@9>Y=OII(E`mT|wE{ z2fUS(ZCOwC>(W*^4U2c@^wjt$15f*_k*c&Qc_Kzfur7H>yU@IV;%Z=`RIXWCx+BmE ztK3riT$^9@Fd7WzwRFm=>kg)g3Z@*2@-pp_z48 zJvmQBTn#!?fjfG5o`4%_Cl%r{g@Bn2(I)_ISPOsi42nX*ywyxJ5T`7SnbOeI5FRT{ z5T-1nF25_j-pp*7t1dm)B(-$qWvZgMTVz)0MUP2dOT6s=vjJXqlxZVD)H2~1*pUO7 zg>2wtX~22o>hl&Uxa~D+ma2{f%6jgRM(A3$^Pp?tx}-N5cz1bbRw?>UN z;5i)_;pyf9;xfRBxp93dw*x+s43yi04da{;cLZ%m$#ak(0cfI{ z1@wc=n2Tee?WWTF$&!LzOjw6!6^4;E`V;2m%%_M~9uSY|q~w*q*FXICix=@wpEU(6 zGmWgoGS4CsEj~LlHG!C;8Z7fPibg`%W$LJ*JosuAmKpq1A-y2X8$C}(yt0?y>7}vE zke?kav%K0RDgjfVn_6O-%ljI2+%_H=3pETla!|sNi$HtHyrLdBvKNhv!Vco)w*AgT zKa0@+o$$Soq@&XV+dCVbDE>x{E1{k|#x^BzwW+W#)dY5=KX$9G%&_~})H zw=R+d68?jQGbL81?}Qcx(M{GVRx*&-!<#FUW(J99E3)Lum?5ccP*$Fbz&h3jn~^PH zTdp<@HyPDw;)ZR+L{?>Dn%krk8Y^kaz)8)8H{GBk9)z>buyCTXfI@bZauW+sa(hV~ z9XKGFEebg{q1sjTXK$IxNhx{j=owIN=k19idN5Q&##JL=Nt_9I2QZFP3FnfT%G_Lg zERXyTkjY+LBc~Fab-F>F12}s^Oje^kfeg6)J%NlT z&NH6^qd{pzZVek7hT~11r-=ax8(UNc$`EBe*Oo!m9LGgrs5QQkx0&QhTj?orjT9I` zzdEK!X&qAJYxG<^QrDKD76Py|4^EY|xK{mOXmnzN^QN7{=SRR7BLh5x=%@383gRJ3 zIe2nVy|E0(4|z-_@ENrPVSLj=VOSMEiRpMpGC(MyP09^+P5ivJA^&~tagvf^g9GK$ zJWknECdAQl=frq%co2z;-0Gsv-c9wvcvL`j5xWLnLOmVZMk@f#D^7Smw{32dP7>Wl zNd`M6ZOG(ulwPLYju;=t8AkKVG*@g~Yojq-Zl~L;%1VPuL9{|vjievS4;kHxt|9e$ zRtRcC2nMzWxNjIe5Iowe-Ir)q?2 zS+XBBBRUbA-JeB9v7A#W*0GNj`nn*kAhk{vP6%dyZB5jfS(%7&xOr39D;Vma8*|qN zbz_dClUr0hLO8Wamj`nB-)~=LDIoXL45F}*kB;^$H zlsl~X##31VwIk}*j+u;cZs7rfJ7N%i4LTxNtOVEW~8@wBoo1p-BWA!T54k7OsU? zQ%_F1Y4+M$()^MkLx<=nkABFW7Y`wRkvd7^JhI#xydz-(3VGp+NfOKBYHOUlTXj%m zCOi_~EH zIFkwZ_WL8z?qaTeq&Uil+SXg`-7;S@%A8rKZ`m{_N=-#aFC|u2f%H(~CiTKc_8IiB5R3hGc%3E7n{Ra~a1Qi3QPoEO@lHN>tR{#^uIVT)P;Glo+aGRz*Q zFY=r?3H|FwLC+v-dtk$zAvlx{%f{w0OK~BUDs;#M{HPTZ+^lfNo+*IvTc=LGxHQ?C z8Oik&_sr&UvwMo>bLqm7$;l(E*&#ngEE)_(pK(IOmy5~4?)Dd$LLKJw#?0L8x7j> zvMC@t=)B$eGgrUs=DML@{<}ECN#sg?bveEOMBdO~cb26%9Zc4a8OT}Fkda5Q!qC_q z?2rsT)(0CZxMMz3+V^rzHy9**OhZ7qcEiIj)7TGhXdg>wSjWn#?IC_XkeMouPG$W5 z%+zRcDigRnF?-|Qy*JJ#_&KoD;ZNr02Ip_hb)MXF{l(?fkB4?mc6Cke8X8)k>gt+W z9|BmRSOjQc<@|wSTkFW)*`Ch$h1T@`pZf&X6Ed)=R}+GLW87L7iSpp&L^cx+0|C80 zxsDjB@$7gw9cIBHP3caB#d4BHnG{rRIDu>;vs{8_aScdrSQJxpBHW(#*0TWqo&F|< zVjiv?CyWdREW*JRb%@^?5u=1y>jD)L)nP)t3LYvM6rqNKhO7%(aAHLePU8U~I$?@U z2ZSyFSw_?yrvM9DYDF=EoFwyEd!jO7L{mQXW)a^}>>7$c z<>rUYQ8J&6juaMXA8KSCm9)NGU28UqEe0(M5#M&D*?qh2dF{!Quf1oN`9uyt&%f|n z(huUt@gG(9C1~AIjsw}i+>-KCI}!)^F4-^*Sdhzj=<%~oe+AJ2@(3?f5(|Ed?|Ch$ zHvG`Z*HIN_INy=~9WhvWHtGaK#p^47E+#L_A~CkrCmsVb`G=?rwnMzWoE;q<-7&gj zeh${U$q9+YO?E^=PCnA5JZ^}E>9N5!Ton+LLcSC-uY${L7>$=)r3nhaipH>>QcY<3Kj2W!DQQ_~K*$qUyv8pqX$%!uAm5Xfx2 ztcSNaip1hD-H=qGr2$LI005tI!c?J8VyJ-oVnO|4syeJ^LTV^ti@;2|Z3-vMpQ?s* zR5TbDnd3l0c#Df^SR8plA>F@92(Qaa`Mo7%k9YSjP0Z~IXS3;G`*c@6m+31_%q}OU zIujF_fvgx|dD){!WqH~6#=^i9j||S>=Jiec!yO%Mai70;xG)(AO$S2l?{O>7YMS+} z%R4A&J+uXyg>THzBP$}Bm}sPQBcX#%JoGKa`YqA4ltm3u+dWrbz{|^i<<+m+BbS-6 z4YAz%Y_XY8tQ{`{P3 zsZ=awN*RbLMWk(0XYo&#mk*dfx0-YL`{pn>wfE2@BZx+R&pDrg|;o=j%|U}2Hh&YgHKrzj+wZq$Wn7cvcQZ# z1sTJtd?95oUj37ln5$UgjpCjceI|M_`k5CAC||^Z|G+oi;91-gJ|@WrCeV<7Sk(uQ zl&4QWONhncQhqUC99|T@aXeryb@fl|#S{SSmbjzxOZs7zUsALo*HY2OfBEOoMh#h; z>>XqE1k#w!p;P>`h)EK@LrcYyLrn)h@)7grGtPmSGQYxiBI%qCPALc9%yZ;=!#5-T zs@xNcPK^^~!+hYmt`uye(R3`$C>trA(}Qqi7QP$+>-{4r=Il$MoQ3ElXo;*k5|NiB zc1K3$0%D8A{*rBbuzDeuc!nSt(dN?Y|M>%kLNXu_Q~?!04%(`1at&vR*JugB4-f&~ zDFWdi)mIU|A))YKye$|)K?Ds8W@$ttVx4X@%enzUk8p3Gf6dS#FS1ZSCmYU{FSw(I zk)*B&dr>D8V$2J|U>M;AVc?cGc#7u+!J9#ZHt0q3j8<(<@>XT^V6%M8pA$%>P?40C za28AQIzh2a@nxgfzgpUHzTYvG8ku?1o35Nad+WtuI+F=G3cm-oWxvB8=qucPYvr=| zmG$m>j-7&2iRpa|okgr6@uPB9SoyxLYzFsPGSL<@)+Z@jkivGg0_Ln@7Enb%bPfrT zgdl-cMVuCa7EGFf>zC17j+H8a%-b|mv0P~igY4PfAkw(23|hp6V<1DG2k>}Qhf_21 zrR0pT!xHYH713DDj2aI{O>9r2!Jzxv0f1I4{f5NsG6*LozN?zo;G(QNJy$IMHBe`0(^p8;;CU6jBR- zd?6DFr5937$(3uf_V7O#xy3Z0(hApylg@g%Y8V96V}A|*l4kk@9Ea0 z*3lM&ok2L-*CBWdf!??5KA_0^k(a(D1r;fbBDuYkf zekFsD4Tm^ZKO7h#R~K7~ejI~hp4D6Rl%Ome&B}smNPbY1ykn!`EGsHnTVP)Ug}C0# z2v|mhD0qmI<|P^quhg>S;=(#2#8Bl{a)Dqzh@ykRf)p-Q@lseCSt7cyD>%4*ZeQVa zarns0b%${n^56MN{)4TDubVkCTs&RacW!+!xT_@IaH)K0?_i|kLNtE*6e<{>J{6B% z=!gvNy}0Mn4&K?&vrs2mKo)DDXGuH3=6Y7i#3bm1l>5dGpI=%!e|XG%KJNVoeu?KU zqOR2~pp7o;xwPrJ^T%4Bu2@#JYUliJIpds*kklT2b?Y0S&|Na zp^Sn_ClaqpVn-O~-}dj!4X&imojP?cy)u~F**`fnBt91oo(%=BT)1#07&;pa&#tV@ zV&s3PaopWM)iiEfWzcv@=3U;$szk)eudU3@7KeMfgZo1J=x3&);bap(06|2o!-BUP z6-a?y4h+Di=1;+Ii{HbS5dgLE+@KWGd(iQn4wb0bSbiVWN`)$`kQNNo;Mz8BwM+-4AKRgNU(*N|1Z z5$5UyUQYYO|5TouXV0wVqp8uAk>y+02fB7Y{ig2K)lz5oIVtzFUG$YzrL*=Ra09GcX?9e4pK0A#Uj$OmZ57;78!gh2`F ztt`1Dy^W?kh#rvlO__V`&RXs<{LOBlM8hF$&L4Afav=%rSX&g(!%`>_A|6v4S9zg$ zxbg;Zwepa7gW8cNvit3Wm-k=&*k!KqD(ubggH0g-8|pMMOsmT~<_D13&aAKF5tc8y zgvXYUmpYKcLIcOnim;Y{jEha#3~xzF_A|SUH_MknCY88YO~&BuiOO$M5MyKzE&(|# zv;pJ1=VH}%clOzj6YCj5=EI^~j)7pb;|JG!KZdYtXTXDH7JUoD&^$!@syh{T9%H|%ClNKq9LC@JF+9+w>sVH4@LqPT9eUaHWp2_M^uurp5>`t#56}yk}Zq< zEntg2Ecdrv3~qXcc5;C|)TpsHW_um_hb-3c7%epJ5An-62<9-=?(I+6IZ76Aj(j*r zZ`J4M53YS*Jnxzasjax0*e6cixv%p6JF&(WuFYGY_02*c30e!~h_PwGuV2pNYq;{D zjWgi{|J2w|T0lq{c)NV4iTc@l_`ffRFW9q{d35Z;0eutks%oylXNtk$w6LEiviZ zR&_U&c2qhIwq4-?A$H};OP&r`k^tm%HO;)Shg$0w0F;0rg8gcEKC(z4w6m^Bwa8euF(gyrTVnnP)a? zeVADiqOD;TXp)`+$0Iiof$j!rRAMM}bR+8}%heC5EWo^G8nuHIYfrKeX;>Qi5O_|jUA z1%+Swzq&?dGU=IOw&J%Rd)>&M8>gmGNEq+RJU8FQyC$tw>+|JkX=!qNaDXSv(N9V% z3-HCD5WKHt3J$$)=oP-ys%S|nQ3A$1dAazsvlS3EX$~{j-F(yTcwt(sBv*hPjfMuD z<+Z%tv=e}USS`1w)Ra|~bv~KHFqs}WO0BatiLhfDe&A1cYOZHSfT3OlcDjg~w!q~4 zTrE_7V7+U42qs6Or1#%A+d+tQUoP6)@!DI8CkKAHeIoPh#|=bYe~LdE=skM-)Asno zUd((rJKlNt#>v7$BGQ{$Q*_paTIAos+WRb^2+ExjYth<*vjtuu&z2K^T!0ix z2^5@Vd(YX#$naIzOaUx!P&l(7mdHcE519Z_8JCgsK?BYH7AN)f4zvt-=c8UU8R9gt ze*cK|OTcqjDaVT=g?v}0GZ~Mzpw1>>HI#@KQbPDT+OcTE1>okO(nr1NLD zNV}ojncA)%hc*-y1qspF=R8orxvh1hPZ0uy>{wo@?ZZkL#$CFE&_|UKwlFwS>?l0iHk=)v%!WeQ$>HIpbnKZ|dP;q}Z=CP!oWF5*-(ugD zJ0Q|d?~+MiVy%_G4%)A-{O@?%h)$aji&t3E9aB=dvA%|%n6q_i!yKQ>jk06ne#XlK z6QZIYZRt)znb44+KE0fO*I3ZMYYp>7Ds%k>#P zoh+@MQ9xZLk<%H(OGv^g@yNRZpI*O|oOX z9D^3^0VxFjg%IQP1flhM9J>_=n+2$ZJznPOyL!(c~?AA9L2-;g+vc(`~LsKfi z>p@3-J-pRXR1!V917IqZ&T}&_UetaMU7$jl++M8ZW_HJtIl z*TOEhK|aFY5TUuUb&{&Xa#r5I9RkB4r;O1p{v5(?+&W}^uN>dCIyW;u+SilpXoa6i z!yor#A-EsPFl($>-dR9hiAex8rf^#z=cfbW!U0vu)j(r$5A%pBUzvdA2!&g zYDDLb-M;=~`5gzx6Yb-Nc0S`f8*foH_H+2UfpD_3cXn64Z)u{}4kh00J z^(Z6T2M2Op9c^`$ph~^SdC(UH>xRmVyoU^2#})!iBNL?42b7%I_Frdf7Vsk!o?c1XnSe}d@o8l60? za7)3Y7{ZV6Lkv%MM^yZ~&X?8^HJ7Bxq~@$4O8DW#k1D&;aDmmd8dO)3;WGoUE9Kh| zFV+sKZ=vx{Tb4lo;HqA`k|8x3V2!v^!pe*0W56tg=owjk%RK~MyHbPhAa)pqFb{1I zG6OUsc-obCPHSdRWz5^cFvhNAB6w7$LFBt@ut`G4fmgo1@lL52@Xhz?w{6DwOj(~Q z$Hzx0OD5WhKX7Ep#bMC;NGE7LC25^57G88xklBdR8T2t*+u1{@hQQtNXzwep)OG=( z<81Lvcse&CbDizM6J6umklg>&xZEc;TuBpPQ+eSAi>Y`=Q@W?grrMrbOEDYRQybDf zm_!}bnUkZb9xG;8KghW%a!uM(Yo@?9(gWflSPs#Q?lA+qW{75V-)tB|_pUt^I6LDC zb(|5}fchzAkNf)#H%pmIqZ{lLWqI&<`)4JePs4xxVn!x1x~(^t1@)U4UMIS3=NO_~ zQ=QO7ky??kYfvQQDQ1i%i?Th*9Mx9oa9$($t!=%(jO>*um8d3pGD4PX$AAmKB!iUY zV&qNXOba>&%5#`W0(;51P@~;nI}VI=cpoGwa>oNC5jHxR5Epk593M3CV7DO?}UZ8d)SOS@Bpne<;RDe0XBoPYAKSdJg7)g3n0y@n*7VMv$-P!)BT77O` zWN_b*SD~{X`}C(Pzv0#DL_Sz~T2C@!%<0>)iHg@Ek1p*d%%l6iwvb0x8bCZ*Y)!UXtCw&z3~IoRUe4=;!f2)` z4IrLDEc9hIXfiXMJ=f|D1H|an+FkmC{d*Y%Vp*(8h~n!j>fF*5^?30FywZ;Px-DmQCI%DG6aPxjbLN{La!|oU` zK}dtDJEbNgHA?ykbj2}uDA?ekHmTsxf7Wmex~C_&COCBSeHOWf#)+Y=?0ubC;%!@? z1osS5&QDv5@F;d?GO=i8F|#;3Ju=jb+ENB}?4V@U@`w^1lzFTz4`vs~&&dAZ*-g$4 z!7&v6OP(P{@Jgk3V z)PaLr*$bl;Y$59^@5XNMK<4DxCG2uH^D4LQYDM1tX;fV6@9k)VUlH-(a`hP&yFY~s zF%3*=9_FAysTf;G$EmZ}1F%S0OW%%;lefk+#9feP#|_50S^?0T>-gtBQCN`n&0(ydUIhn87hdb$O!e%UKPvxKBo};ZQ1w8mUdv6(md}nUE z<)*#mIdI0F?syd6$w(!c;h$*v>&XLeN51nraEl$Ew*xJQ4)kx;PcG*hm0% zySYi)H-KN~5U>}*ZabL5gj=ZT20dta8-s?!5{$!<0gSF0n6P|a!Kbz|Fd_m%VSvp; zV8_VVj+{U!m+8!3`m5;A01Q%3nO@WrO-BZai3u)72&A^r*tIJ`1@+b^Vb~zg2Tjvz z*8Cvg{tn6y*tz;emUau^za{u?(^)qf-a*|9oy}46Goo2Wipmb9ku9Kizx2lsLIh{c zN*QTL2R(xeON%nb5Z-xX|5!)`(`oTMbqF3EEv@hFU!Lsl9{6k^HCh-PO9uk!vBAP9 zjL_4`>FZa*iBPyb6#3PI*Nq>(bH}~UohyVQoo(X-{H}W{`L6xpkiDCy zp~OJ8JKG}8bq?h^I&wpu$zgsDCr1x0_KQG!C>*qtp*;tY(xUVFo2OP1qP3?pJrwW0 zWd~aX1N)PySPLyRu_}-i=YdXNSjG zDXx+2YQ-AlM8-+?HO_(~u-9-EOtKvzdcK95weG0q#Wf|MjEe7d% zYLXL^d{*YBOu#Z8Q;sJJ)=V%dJ#>B$wkHERlWl}t$M{BFbry7jx?we;Jr!wO-25EU zFW>B`z{o0h5%dG8g~I$|$x%QxwYQ2x8LnsmKxxgDqLVcR{2ppcGI!S zJ7G2!*S^EGhrX9%4#Zq{CWYh_$V|bgBAiE6*^=u%lZ56@PzP(?GuI{xTG4VWQT6g% zQ5j+x3Yqtbwn`rNX5C;q;02-V*Cvq24Q|=#J`L9#9d6xKPj$NE`8d_MnBg>0fYU>& zD=Z7BEP*G4kU>ljL7M>597%&W%2lZsz)&z`@TO8jS!yKXhjkWNIs+J`o+6%-i!B~d z%Y4TjR~~#&Ew#9)7rA`;_S;XNan{+s_Q$}{9J85Me%ShIxw9uXR2b;b4fhP2s>hBm zdnd=aNdRSk5hfu>SCXQE^htVVw@qkMI5yfBE0Nu7vN;wVnt7SDLn zz*w$&tm#OFL*bk}Lo_Vnq7XGr$_l%2*Wy^S)baDGBunG`@K>{*ERN(EKgN2-v=60u zeIxo!^6)W%HjE_aNdo~YwspxSl6XHMWfF;wb*gWs@cDGk5!B?i1o#@XXR;n(OlEM< z_Ou5 z{=y60Q?!RMD`dWZP-;I)MTQBsxj0>LJ54ns$UD-oGq|2 zAp*#67cpxB+^V|Eqq(+GCooct1@flS=X^Pq#M5(`JrE!xR9C@DXkEcYf_%H{Mn?YC zfeEi6Ihg3nB@ARSu@53!J`jwgzw1$C+18 z!EMX$*`kNd!3z}dx5Xj)DQ{DmIIJ7T0F99tmbZl&o?|emYYy#^!X76bcRAf=C})6Q z!M?^mDzSW39|jgT2@}kWaXNLy+db+a71od0TUo{=&JCQ=o!@u?O?aHVYfx;Uzx?y?yVk4b>lrxx~}Wy z3-~6R^>tmDJ8PfDZkQ<^g$3n(6+&n-SJs&^XHJ>wpz4Y(!Cc``AQC%NL>+597pOR# zD|W}}*l-|J|t!=?GEyAn0tvE-v=p- zrgey$NVX#RzrL^%F5fRUYz%NRs}TbGMkP=;=gtqh&3POq_y8Y-Q&pJIw4@zvVB?mI z$wML+ZVV?XrCpfNrL^lAP)6m+D?npB-DTI+?JDnaR#)3+yQ>oy^S$l=6H}PJ#3pY>ClwXvx>8RjWKM5`K&44Qo6V@_5a_g#)cjv=%QZ20?t6=)fZB~I;RA5GtA6i` zt37-O?@c_xE6eQ&Lic1YbIvcb0-5Vt#)zuz!|Cw8G=&e-3T(`~dq{x}9%ms#x2H+f zJL~pZ0QKJco|pN)VOB-?LSoXC&iQZdTLFm%;vB*|#jKZo6s5fZ*maWp4zRKdg1d2$ zaIkO$<&}-Yz+^?p`)l^Ex4Ip-5RH1M#n{-w@>uovib-&XaJbl*8HxdriH<&T%oEq@OQrk3`7kbD8WO?*Y3rOakEbtsI$4X$hIcmgjZZOCdm zku-9;G500!Yzk9z)tx+L3ANaZ8meUE;P>%PK+lxDk zBomOZ5U3&LC3x4N^^72PpRi^Xyj_t8yPGNDFI~L|7t4EFqPHF<}VdV05<=2v= z1K1_xa`zg!I1I<2^?{$-$R%twZ)xNL0W(97ghOx$$H?UlMI)CU3Wv`%a!IK`VIfqW z{UEJe&W^oPS-C`{X4e$b;aRmCC;7oml_%_baF#kzIXFVuiUuyH+C*OzQ^DKfg0PWN zRNhAr60~e=7{Y+Jl^oT|v9W{$K!#>JN>a6>K`0*NK zcbyhz?Yk(t{zoZQ-D|g8{U+3nZ`uP-d`ya}1}FEJw6n0b>3bLfHaf!SrK$)p+F6RQ z4(eGmpw|Im!M)`T*Dz#0OvDy*k8@#~HO8AGXT00aQv|gUmI^mFbzK@=Hq6l8XTMq` z(zP-EcX@;R_un+rVKa{Qc{O4EL*8iqk%Jeo35=0r{Z}Gpb^*@|+YRxgM9aATeaCi# z_;*}9LGGGy;Co$xPS7lnPh2aivLl`@I&^@_4QwqE;zSFO$AmMlO*rmjynPyhnP9_N zqB3FdpU(U?>!^)5wV2P%%BG4`L(|TQ&3B~E>{I!~X8Y43V_UNF9u4A5%~=0f?q-e! zX1K_vfx?GJ)nqYowCC|Ku1ZDE6fdQuO&758*EZUuVeeuY@LAO$o;{kupz;YcJ!30X zJc!)XMjM24utJFd_%cXgFzf(b0h>3_k6Y+$Hi_6W%JQ2W4Vf5)l{7xM`F7x*MD@8% z%c^Qw=2dbJIz@4z4k$BR&=NG)2@9*d?{Tfy$B}x)8L$-RObF^c$gF&YMIT(sZD$jvDGA8qYVokvs#p|)A0V^Z!W%h-XOi-gL$x&RFJ_WAo z5~K~00k@U+a}fCn>hqPUfF>o9KnC+jBrCdipt?8kJOL-$S*=*2MlkCubDiP&jYcq{ ziDYO}256k?tfspYn-{>nR3n~nD2OGFOaU2kKi&a3g{x-(>BPx(wuNDgB&m~~C2Mz1 zyk5oQC2Mw1Mc}=+W&_=Pb3WwUk+Ib~m*!@AbDarZ>@c4gg#~3h{VhAXARlLO_BV{J zmO-GknRl9Yhn)<<7`YDfhk}`Sh`XkfA#NR8ZMw;_t;7|gGakzigER+i2e*?B?#rP2 zXKphU9Od-w=YiWW&mVK}JH9yS_pQm02f{N^rBY7x#$!vNgsuWlXuFKZZaxMsac26X z)p@_WlawKRslZkZbn_Skquh#pew><|oDm`qQ0`C1zm?H|aFo zeeGDwwg?LFfg?h?#*#_z9{#PeTq?r0k$D%F@UD$|!CcC#8K((5fGwg0gE|sy(fmXz z2neT{b$bLgGnST0)4t+j(_Ev4K^6I_Gm|0~K$hPK1cLt%NrZiojwsUMXBQWy5*y|n z?eeB7O_k0b_6H)ZUGYFN{5#=vCK(8Kb#+DTP-|Og;~b>bc}s^+?41dO+S=NJ@qvLH z9X$fMGY=tmrmzlM&y#l8!`9)ciIJg1yJLqP9R<4`i^Iediy*9#LBp_`g7bjlrPYL{ zqjs4GZN9E4m^UewYYRiGT3hr)RT?=0Oe|Gvi$q?sLP@jKlb)onSX!Wsv9P}`7d8>~ zMS}x-QyZqBG<2L8-8@L1J&`qszf8VzR{7Gz#HJZ07hSzVwcD-Ec+qT-pb>F!Mq2m4oNdd{A` z@}@Tx^YJ{=TEf2D>KE;U$n?;=-upTrOF%rseyx?ao+x(?^mgk) z0tz+Lf{a2s=h!MYC(rl?*cyFA*7#bIVD)$l1j=Jst2ZY^xC(8*Nc~U2iLREtjfQ}w z1k6irjCVBHVrkN(uS^ZZ4=(EsZIB;o=K)_OV1n>>Dl(Yn$xMs4 zmpiAY#z$EVq`%kTWSkrXyIsHvV&nWHsI@EMyC4g^cNJcyQO#YhFs-2bwmpojtw5C> z@}(^L$CE_tO?l&?s_N+i!jAzw35Yxap4T-T2YUj;j}q3*JL>YfRU_+(@k4HY)nFmL zOm5BIri0SFVCo2a$8@CNKoMwkc~KRYmT*zYT$GI}WQ#8}!~FbZjRmg>_~)ZE>4{fX zqCK6dn~sU|pifnAo-x!Jv73CdX}$SLM6tN_=5g%g8AB>Wa7M61YLr#w=4s2C-MA05 zBW2(M1<0HfhL}9n=81{J<-}vIpH$b2<{cjLs%rC2FQiGNlcIUGd01_{mCa3wdH1i+#@C=HuKoRnL5;< z`t=4wZFG{rN1K(BZL}q#R99a1(=})4E#1i^piW6sZT9M?>i0`vzq-ZWz;9j@?J8qt zCeHX5;Djk80g(6R@@P9~lDZ_KjxIt7B`CWUbZVVO_vRyT00%~cEKT1U~Hf~WG5f8F}Ji353)#yD5viaYD5TJLoOmD0Eae8 zO~FY7b%7D^@C&vKI^Z0obo!DDzJd>bWcuc>E2Xmc=)JEw@wBI(c+GvsE?!V5k;)}; z>gqM|qRPXzsQjil4Z=ZQ8=Ye}V8z8qIUR(UgL=JA^@J*|f0Qp3Loq8XHDt@GrA9CX zZ;h4~q#85;OS#gG+5>OJ_EZAh9`q0uJx`FU#>a5g>YMXa%x}brxRnLaoMs#ZARa1>KvB*Uho-18U&Y z<+RLY9BY9C-pyw0XfO-BWsyY&Fgln`@&2qFC2j3aUTnG4m?N|qWMpOJy;54_MO)hy zMl5U@OH4ps0TEHNlm&-Dfbx!29Vo&h|FH6RxLu?}QzlJNk7Iw2Ez7rP(%^p%hN^Fx z4EV4>UHyGe{`&$(|FBKmY0CP&av~ayrT_(zc3GnH9Gl)8c&aZZ-yJ6kGos)x-DbE* z$W5I$_vFDP(r(hzQNJ+mb@<%%3%i5K+;K~u=IXLj5~N%ycC31BJ&Mj+Sp^<(UjJPN zJMtp422L2%!_GqBWl&2I2#g-_3)tA9-=bxZbsIDIJ^$4BD}B5NQLe-8sSJp3ilK|T zzxb27{#-8hm%}J{h?QmkB(NcCs3`k@_yu&wL*~Vh$kp|4{(M0EMef_*mQUBuw?DLv z{+$1ZuD;TKj-LNr_CfIx`$wRJm$2rO>kD*jB1F5|ViI>KrVC>RO8pWRqr}2~^(B5Q zzm_$fu!_)^=>R4qZJ1_a=?xNMyjeTt6ctD+oGGv{HGIH@b)Segbivs_N=@va&J|MO zaH^1--ai3nGs2E{i=UG=iJ$IT@X6vrW&wZS_J-%3eC`{bf0Aw!SqdKS0G(adnPmOH z+cvNNHza(>q$5}Z5Xraz17uM|l(xg_heE2@9yUuBMaDtp{s6kB>;E}vMz%MI6j;?r z2hEx6JFBZrfRay|?;GzB>0afYm?gc5t^%~Xj!z8Ud4 zOx4<;#GV?PMgm+U%|~X0GED+5UQVX3YgE_K-k`vqLXI~nu!l3PYEX2NmmqV@r^@Xp z?ilgu+PQE?(egrRsShn=1q!@1P8MziDIgI|jtUgSX;4BktzbfwcskWoV$O9duKt)F zi>>&bu<3Wh4rv|)*_Zs==ABEoE}tvg!WIYoI4mr-J>Iy75jYo$$zmZ`NG>G*`l0n# zPW;~ZeSdcF&p!Fy%3B_Iz!q1pQNOwgBYYL|dIQ!UaKgFQcO{-a_7{nDM-7jSbdSEWG`Eewp{s>4VE!o3Vu~{#+=e zh^S(Xk|k7FM`jWj*csP|)C~nO*O%zbKl|fdqc3>DXjkQ1e>Sx?)Rye%>Fa>i^ebXD zn@VNRysdZW;UV!rVrU_sT?oey3=gi1BiI$VUf9^ah{@-zXO$y;h(n0A!oFkPaSYCH zs|&eXL^sqDM%0-rR0dOPMG{UJ-2Ah(Z81C+i0cB`43FR`Q9byfOv$RmpOXgIWQx=I zXAkC<&y-n<@Vc4e!ayuGuuz=2j%5hTXO?p){R*l{k}+B>_}-@i87+ZsLB5gb2s zp7hIo{6E;oDA1Lc%86J^Sh22HwmlB7ZB#@Z|Afaf6iM@9;0$Rml*OydLl_D)$G5ui>!+3DGB8kAs&-~O1pNwn7S04D3K}N01cTd+Kg&~xtpL8n zu|&8Xh?!IZfojY?Y8QqJ#Cvw&Z)!pO=0A#b-X>;MeD8Qgl*AY8I{$KtfF0X&Sr@BU*{^3ZroFd8}+>oBB_#gcmh z$|NTLEkt_Y(?c}QT6BRJaP#1MkT7xi1aA^MY~-I|{2HbOZbns0EKN=VJ@JDC=0k>e zn!)%hi4c-3jKcg;QWtnM*BBegYbw83kf?m5ckB@F_`6G&{_YZY@{^yeL^=O^F~sLF zf1gz@NANSaxd-|j!*3#KOqFxBbBxW$F*LVihCO@d+~h6Xx)NE>!f%u5TsjASgDuf(br%1K zTE8ZJ%TX&z@q>?axM6=Q!OJKZ4-&+-wpb7$xT!eMxWA?NLBV38Q8V7dE&!Bl0*@sE zbY8ZWwSS0=YGK?DNr2ZEC{!^o`7#DJs*-;X`-uL84&PBo`o#Q;eHZTkrC+-Lg73wv z!mj)wDDyL-REdgRm5TFzJiB4*71ECzM!G8bb`D1cZ^CnhgvsJ+B2V>1(+s-ba=F}B zVMF;Y&|AjP2wTL^ApHg{U<3_bK_6EKz$!jo`LVck0B8G>SgU~Vfn@3YwT zBslQR<;c)rH||uA#6W#fGgOEO)Vk!oBtuQgdCg#?tOS0+gJqhqrM^lx1I|6t+Phw0z-Uv3T&pvi`&*@h-e?za(iaXWfNZl2|Mk>!xWi z91OrZt-XtVG;A_Lk+XJQ4uq~{Tm*_Ba%9r`Y6yN*uf}_Z#EDPEGz8yG)tZw5OQv9u zcxs@nH~rM7rhD50AG-9Xv-`#piSfO&;#j;p6?=0mm3!NJ#1C`RYbX{p1G?irywv`H z-iMn$Iy5MY^O-4k(^c9c64-Ryxnyy26WVzu%7v;gK~?xxDKKb+kv@$RbEYz> zVUr#VQhohK6sNyiS-bf}(|tJdth^X|+H*3`f4Z|1Y9Fr2edw|Nb2;9l>%Ud)!caE| zgp>^O1vSg9;Oa_YTgz((U_YhW(s?U`6lIVPdU%3Hq<=str(`nH>QxFgO+j}w(KA7og_aA-W(pP8qj&&5rp7mUDQ!JZ^JRC`6 zUjI??!|eFV;PkQ1PvfoV*P$FfM>%|UdaT%$A!1h3;MfG-WipA|5&8%eWr|pOPkG4D zA6rLNmU4J)Td*x&@LT{8ydq51gB=xJCINft^O<683NW+8{Bb4`rYRl%?)lZF>!v$1 z3pedb&dravMvK!6*?o&stEHneotgQQ%fq)9M^4S|o)edHoydz*SRBgln##2x%ilRa za&8Uhdvbn%a$t6FXmv6h?zkh`DueB&FtyKu27=a$B|i)z3x>R39Dc~KLPyS(_l$=f zBH&8^r>Po{n(?YRcHj>bUWUN=@U1Ve8m9* zN`=BY3EmgL96PLsXlIJYB5?0o9iqdXBTPw@Dx}aL?M&DcBWLwVI}>2(RYwj)EYBR~ zIEbx<3k6i$tdZi=uJ#Y@5Q0x9!akmP&)vHh7kA%%Pwf0@alG=O>5-9Xex5!L!ob`| zG51c)J%IIv@I}7|mq$_IY|>(pUR>($`a8LDs(~v!Cft6?cQlBIEx1Uq z6})!Dgj{H<{evw!1Xy_%>`v(3PkZ^x#SgDuga6B}#8&1)creBwhN6#KsYZM;0TWFG(d2IdsD9%v#%4MRP{iNx}xWDmT$KA%_(g4(F& zgJ({9+HpB8P;&^ zge@0&-QWpZDWj4&A|v4mgNp`&EW+nO)4G-}&mV%45KNx}m;};>ja(HNgcWdMQlkqI zI%^~y3fNV6@#)$=#Nk6OtM2OUMI2g_UTj8(rMI&OE3-pAOCD)V(vRaoSFh6x2U3z6 zrwnbHA2frmH0eep9WZY|MZwcaRl2UTaY~jJibYbVYq;xyU+8I$@$N6^7;^1*|Gw@| z+Pcr!3mNaS!`k0u*|FR1x`%Ni-d668L|_U=L2#Aj3K8+rXoN)MhKtWZNaj5qfS;Wg zHNc6;paf=xN}nWq8S5MfMAFCtD}9m%^svydBaxGm_PzbRT}k^Jb!FQYKM#OA0)nRv zcxc&uD%s}*=ge?RT%;3DS82>rcx0Rc!ja`}>?7|`uPSdASfm8*c(*?aJNjFK%uwUU zacDG(OifB{d7)U+ZhH4bJYRW?N8Ljqv+O#@0`pM&^Eco}aoh-l;#fJG%jE`hgLzrt zyDO7Kt{5N&-Tfg)3gQ+)4klzZ!j8~-gd0aCbx8w~hph=A#bihY(DCM_U7mdqb!P_& z@NV4K+f8OQsqV(F{Q>SoqbIvtva?|~Tz!}+87Wve78Jv9NAL!~WATG%y@NGfkt99f z4}tyyfpJhXL!Fhtyx~9|9_7toQt~7@@f@H>j6?Tp=7)>UhKV_y834Hi<+oQKuH|XO zYW7;g;vMB|UtiyFAK{wVJk-+Y>}U%i!q+)j>yT@8X0u*McvKhQ?I%W#R$7_-PF^cm z16sb|4fpP)0f6nP8wJ60a9znPXB&-Xc9#PYuGw@Tv(FpPj%iSpf6jx~)bFKdWCo%j zO%i3Cjq}1D$2%zihg+V>lg(Zaxzatfu;n2QEi}o@=DMB$2YX@vNKaRy4FLux?X}&` zKR)C-rR*jM&F{F*Ds!fZ1ypBxvcZ{-I=PA{W$cLaWKfGnW^$<#BBLX6^^xCFOawi& zw8R?ILrM2VK0jZO`MWkA!%GDe0iiL;N9*op%V740;ZB`3XHI@qj2Y3&^3d{w!o;U-DshWTGf!K}TqyIkVzA0|LDFq4g^}r>1xAoSxb_tPibi z0DzT4?-i3fW@mRS&dy$4G{+Y2guEW&yO2*~)`d;;dej(BX}sHd$`0NO^x&wB9jQ|4 zFKJCZbLP^!K6THzw~gKgpw*urX1)$-Yr*YBUNR&m%4ysILr9T0$VQJl6u%Tku#7^5$fst2EAzpmKsW7H9=Uy3IccE3H-&6&GGS=gY8UO!9u?(X2WcA|kE#N0aAKvg9nfK* z)v#0xE@-o^Gq?aDy^}|JJEC!bC91mJUhGVJc**TS_Kzg_QZ)jHm5*%JQyaw{uc!=H zD8dnBES`nMf+#rN(dRz*<7XTP=?Bg@F46~Xd-|upU|gi1SM;lGw!P5$;Ma}y#*ig} zVkYf{j@K)X$vT_`>ml?(Wb(S%ii zE%tBm+@csRGm})&Dvm(f%J+9?86!qwBT53OPh+1wzyvHn69I-74DTM! z2ej22LFo_JuOQBX29*?zvkC_ZDy?(!p%99!8!_it{a~0BHWX7rCW~gn216kvPUv=q z`u%lo10IlNw<8SED1nG2L{y181iC4kklUZb@VYtZ=_?(Fd3OjXiGCED<{b3V z#7_ztic3mQL=FT@Fc*~Rk9nVC&G2`}y3K?I58z#8}#WQvurK(i8)O|_T^>)8f9*^Uf|JLMU~!E$q{R~z(0M05)+ac<`- z1Dy2d@J2>~M7KAbHhR9seN#6I?3C02YZqEdf$qA>ctx82OOOJ^+|cRKkt4Iib6@}Z z<+EpRvqfvixmdh7a`4v53Gu;Qxq~OpN?rlvvpp{U5!vXzQFggQ6v{fn|f^+}zg?y4eL^`GML;%gLJFKa6XVqBltJ(7=C17dH_uVBZMn^ zae*gU8H_=s!39zIPnUk<&Fw#b;2F<<&1;_jj03;W{+9dizyD(P()+&s?f3E6V$8_3 zYe2RV=h2Ty&OwNpJvkH1X$Vqd3%MHYhO3UZhV5V!b|KglFyk1sf;fPIoF-W+>mVR( zQz5mS2(M5uG=bm3^H<+nSPqu7@MEmGr(e_?#d%Fxk2ah+D83!DHW_B76<#w!>Jz44-*aGCO-&b z%^HQ8m#}(i2R-K($dy6A?8_byNO z*-v>!AXXPsp4&OqC!X?5UnFp66c$Z+7p-C4e;eyQV*P13UdWT28B(Ni16=o|z8u!Q zA0ZNkLHJ7 z_L8S^J!w_>`>R*_J#HoR*wFp6yXRB$r*~n+I|kyzM`nhnyCaV~b`9K18uo}1n!PsciZlE}d(<-m)O<)eU=bVM_;43M_1qSZlQ^uREog*;NJLX6-w zR4&hQ0b;!5p}cRDjncqM81je*D6vo&bsv%;b95TXP)zPVcyKrWKKOzcJcz&I-N(<| zaqQR~XO35Hc<7a{e29OsQ1JU&){ik{42S2+qLeKyYy#LCe64{90k}oogahIrn&9MT zM~+#Qga9X464z!P;>!Tu6~%efU8jUOP4SezAztv+Z9Aci$`oi|=cgOKZU`9bA;e)O z=H}#2-T|O1@7-2kx$rePeU#dqbk~g9C2Ex_WQKt+{Nwl4hrnAM_?5<1T}SzYjU6V11S}oKx~5Z1)ySAr1{M zWMH77vkAwNP#+jSX}-X#RwKSKpp?iHmh@3^k>+Z;x*DB32;bD{h*w|PN*{UNWhj~I z%P~yw+MtIj32u|nZR&%{H1dt569d!7CUdI``QGWZzBh|ERUSW(po~k=lG8K@zH_4Y?^R4 z=YBv?1Y{e?d_8dWW5s9}pn^bETie56({}9JDA@slZ@H!uR!C;JrXFm%W5w|^xg1DB z`t#&#)7&aANp<`f>Lr+?3@6onZ)J8V(>Jvvm0FnU@15KsUepmEJ8=6#*Wm2o zn?^=%Iy^gn$4ic%`IQIvwlNtyCYges_i@lxzuilI$ zoyIt(ii$qdO7K zF5SAPeA`kslIU*F7Ba25wF~0IWV4+}6q_C#?@W&6)93c@N>7huvm?{#UHdEV7+$|& zs%vR}JQf=oo9rCBesySQ_4={S$+4kWYH^dpkIcw+#zOuZPE^zcH;a?q7 zo>t9PB(zd*2lqkHnuI;jxp3sc(zaJHAn}MfVS#RS!YaSE@d*=g93vE80)D2!0wUsTeKjnl6L!hJBz8JqFRR+d1Y15D5W_u$UC^;b%zE3d!xw%1)L z@f-s~CEu%uvLebMRmXpgLd}Ia0%85qE(AIOTcxIgS2N}m4WXK;X$r1@SC>q<%8EmH zqa!OC`OvVK^h!y9KmFX`KrEaAl zCl(+}3^tb&C8yyo;*9}g2cy{wmIQY~UGK&sVq-B6cZlGHp+)1ZhGk_=+WSCn{#m!_mMtWj$=$B-RhIp7l2y z$a)1OF;Uj56|H0rUnIyd;h&p}Zo2WmvgpPw=z!1JmE7+7;EToagBLGet~@HnM5ywk zy!b(7`p(aO7K9JlS_W+ufaxp3V*}di?~z8VBxtKB+}N`P#+@PU0hw77`Arr=z<^SI z145W=mhhz6rd>%|{C|%t1fpyr#kZO+4MjeAY#WPjeq3(e{D^h(oA3?nS{jeVXLhdU z2KFrG`{wr!eMJ0mVECK}gL`v^9xHOL|&ZBi%}ljWmJ`{@AJ{x1c1sr-@^g=K^>8 zyIPRLIrw*3?k6qFZRxDkh*u3{IeAAb!O9<(LFoSinT*V`CN66ek?aHJuxwQ%vqRev z$w1f29{1zz*yF%?o!G#is6Fn-SN--cN_(8GaOybIL~ab0vvjUR+9NVAJAS9*{v5q$ zXB%V^vU}pzA*I9r19QL_kk)?I-E|JM3^Rc!g_J$g2SS6r#JD!@Pd)%ChF%Ncu%r;y zBgYI=Tvv9Jwd$_?TBe9n5fD!IDtS(iMXy{L-BC)wUojT}>W{eL-dZHb{G0ru@cBNqp;0tg+9ET7OXPB!m{JqYyI+mkpo+o-E%( ztsyuBiK>7I16^O|LV`cv1&44!xR%3pZP|=oG6elG+XVK{S-pPKm|KLv5vW!ZvN^+f zY-Z;w$QFz*aa+K&^|H?5`=sn>fh}x^-6Wg$8NsKyJUOm(k=>GI-lpde&6Mxc>j;y$N7k zS9vyk&WtppecvU^Bg?WRYqe!rw!DqEBzBCQ#Bm4-aU#oOXA)U*Br7X~LV*&NmVGH* zC=@94gR~S%OW6w~UEnJ%kOE~ZkV1jb0)aF{mbF&EyCizWJEJ4mP_Wa* zY5D>wMMS!!{{$LQVMqB)-4T5$jh6-HH=;twHz9KB-Ak8tI;)V5#k;XDLjXodijm6(G8OC<1l&48={yrdplg^N z-Do&C09K$I&3HBiTpgU_AI>NR;^o$;6})-#-0qh zfd*xRNouJupHZ?R=iExDVh;+AKnMbqLV~7f$h8g}3jt_Qp#5Qh$h0a(x)JUHQIe(B zJ@YgqKk`hsnp{*b&Y`Ip!42K8!WARYJ#OsF!Zh)hi+dQ`%-q?QP{jC>rvn+nyX{xVr78CiiE2;K?U0D=)=ZWTQno+rfAJ24I_ zSC>{SX$WlxrJ~gY47yP1Q?_d$Ekr4)b>n9$0B6F9khNT~P;*lp=4NGrBU92OB zV6+9XSa&L7>mtw|vP}<@b54wp4ktPr>&ZEp!R}#aCwQFRj6k`uwn$M725Xk7r=3<6 z_k+O+8(p!7%rbKjAEKBsiHe2%RCxcSoJZiTCF8l6{geDD#Ct^si-{0@!3!De0X#t@ zXNoLtyNYQXw>8=m%3H6lw1fULqg;J)wIv>m_LW&&eW8blsi7cxTBG_L3I*V(*40*_ zq8Msk({meKQIVE?4GlhjQ>_&-Jk z;r|c#fe0Rk9|+x-f}fabQ9n(#lpq@g9iXc#%Hu61Eu;x78sgbff?_e27&Hl zeR$;=-$rhfBSZrX#f1SSfxh9OE_2OF(OwNuZ%$j$Ew>@!IO4%umxy`-K=#K^?` z>fe%0tt}gR)+N?-wDh<3JE_>iWp}nz%tFyXVhl>Va!zC8yUq21e&=s@3c5eR^-fcUR0D1N=8UpwQyu22uAZxI|2dx((t}06##Ht z#GY}yylyt9`de5mlh!ZGrJ@(V3mjZ+&u_c>K(Y?DzxEb{*1NOo;Xrcs@wnFF2TsSA zm#PYUC!SfbKYYV%%G{&s6`(OE6mcIsq2UG1#d?)>B$pkII93MG6-u7x+BfGN!W&LS z@($ng3{D~!QN%a$Dag_hRj0nlnvuB)2KXi7b)RLRAo8J0P*Ivk2N5O@h3c zAP0L8T`34OS&%9RF$l(zF`5k@e)_Nz4p=yssN%sRbb_)tWFaIefd(M$2%iu=&d^`` z3^r~8vK?J9<8dQnk0U}`i-4_f-=^mwSig1gg?H#f`u-;#(IZcPIx>QSwNo#b8e?13 zi=?*F7PVy%z2iFC8tXkeG}?@Z>}!G0+K=vW@IQe0nLZZWJ|GYhA!KA$TQYT`00?#` zFv`6`jF6g3C z1HC`6!y8c`+gWQ%zz9m9A~RGGTqa0MBvqZ80u{ksQ@|~G6M!g`fT&BC)w-QV z`O};=BY$@5s$^65I#Z2?*9?W# zu&xHwZyV9*1pWkG7&VUQBg8L8>-Hfj?I%%I#|{B4gHkUtM3y3LA)TBWc>j7&)oI-A z-uR_Eccp5ZJV{__aqE8D)c_>e)JAzh(LZ39#UUZq>21kGc{!!;^15;>5|ot&JJMIU zx>Q@BvA{kcL4y_h=9PEBxc&x9;nFJQH7>I%?ZFC7!+0$K*QkXsXFB*l%XiRq_z5g@1 zF7pFj_o9;@$SmrzL%M7s`NW>+eK+3t^vbj?qB#fZw}CvOt+tvVTyQlrjaD;RHDO@RUSbr+=<>3`LeE)V4syg6qIlSLRnALGS$lHI%&CgwXw8LnL(VUFIb#%9iW*S|_F1bn zQjE(h%KDM2yYW))QE4P@&9J{Q(@SzGoWXnZpFi%e&5zaWm|Vq4^u5`WsMo0E0uJJK0NzA$qA%UJ_dG#Ylzn;_e3uZF|D zhN>{K1d632uM}Ls@*6u3c&*6rBw^Axk1Rn7A*}!o3Qht}TMz9;S@q|(gu;>wlk&Lg zb#?*|Ho3nb81*hxiV)S`gAWs#onhA;#n7&K-EsYNZw<10=5-F?m`08|d}lYzYu}sr zn}@)-=#SeS{SN46zj`GjA2zI~zXy4C*-pdX0@w_V7e^1Obt_M0@W>#Lcq;@wDaJ? zbq7-2Yf+e~b^Op!L&MOa@m3URTHBpEaNWYe&ggc;IqbgbWw-5Ws=mFlwybFIXnNPK z^wGhhvf9eqtDAP+_A>e(96n;GF8n?EANqR}olOlPo_mo65djrlUxToz9ylV{>D*cc zX(4c?el*^s%)mVvi&Yshh4dszj=9_fCbpO9yJ!gvKY@rIG7V=F?*T=+yeR^{da+bs zlQMRsz%*9FGzN&3aQ!VK=VdmojBXMCRzUa)_|Ee8YzGE%-_>)rtP0;NO5L=;@dcDR={_p!?UJiIT3hj*~2yS=rp zCQAw&V{To4BhLCpd)7V8KKH0ozp%lklZBzePdZ_7(ye7c2jsu#3MYQO6AHh&B3fSK z>MLGc9y3-yiKIabW5}vXs7$h@0q4IV(SXRE`r1<1evAjwvj+@uLrD-0k3CCH^3dZ; zB1KG&=*IwCxm4tGrhtthKxlx5`oMHGc;JZkcjl)pArM}Ivx>%03KFD&5kqn_Ud8JG zFjs0Bt}(Gg8fPNwfROBlgF2>Ty@@U(skX-eZ>T_)(;d7z=%dSQJV)ES&Iq6VOE zqlT<`_?6}`91!xMlgJs7(A^8OAO0uVwp|}GWLVE<9?~~tb|K~XZF+n3J7O-r?XlC5 z$4-CF!}Axw$*a}JRy~HOlI;xr!y2|+1`9$L!@xX)mIq(TnsiCn zwfy1M5{8ZF-!BkvAomixmQ9j}Qo>f04hQenbrV^y4~1mu5&QC4`>auQ=|r+kE8p7C z+~jK>UcH`iXdB{ADTBS&=oh~Ez%x1k6V3v_N_*~Hv!qf%U}t;qO&6sl-ud2{4{7Ze zRBN0KM0gTa2zPTaKft?~q?^Na5%8aU)l<0ho}&>2QLQIJh5z5lLVup@pXu-x!<~8Z zoo0G2ka%bctD!zRd5;&>F^rX??oImSZwqf571)N9#PgvF?ZWZE=u6#%0WK9hkpJm8 zl8$8|#bZBT@^FEkO3-S5sa}tGQr;8qc0eSXQdQi6f1OriZcv zqZa}RX_N?}!&N9K%F%q-BKFMV{4ivhW=6VxX%8A1Z$~oOwpL$*zqNgT3k-<&;S9c5p`*M5lS_vQRCmJ05s3i!;g}3F-}G65m+vX zCzJu<*zK(0g&uUY3{x98x{B$*u1csI(eB|Uj!CF6J?9VuI4Q5R3tdBc6!v_eQqKG< zQ3$J^6#R%PnF~iSS@Ke;E~uHAUU;=g4Ec`XQtzhvMM3SzO!vBNTQ;|EHR{>cuEVXb zdTQW8bUXRhQy+i^b`^!Rj@sL*>MLSSKZ}?A5n`LN4Nsip;Ij!V2MOE%hBdF5Ii&z~`es@tbr{j7Gea;UnWSB@%L z;j1#$=iEdp>4OJ zZ0{g!gPT}21;UkQT!wsJZ0faTE8d`~kilIz=@rY)0gi!+w}$t)+2HWUejwkqGx2Jz z69H~e4@`R=&2Y^2IVN#81=8j6L{=uT(z7F1wOy#5HHLo? zVexrGm>M&mN~iT?+7h6L^fj4xM;^;OthX(}5N2eSDeD`4EZHWpJk>=32eGk{c^<3v zz$3*F(0fR^kfEVXU=2}8*H6F zFSxq+6+G5-^s89bPW91b9qNpCw!@C=qWZnal%cGMe3D))GoA-ejVI?PV#``l>2BiQ z8(9g6M`2)&1cn8E=TBHr zNnD89z_0+Y{3wAK7~=xN(3lH+oQ@s|xY2bVPScz7)C=T6BNv~*b6x>@(um4(pH4P4 zfyXwrB3KZ0CM(Ji>ClJ>8dtN^YjOrw$7WV`0Z9ko_GYjVpE%+8tv8AT{1tP7R;Zz8 z__8-HXPp4X_2LE}3UbJm#@lU8BAw;-z>>Zxl;1rg9nI%xe`{8fwBZJv@<*fOTh;2` zWK~5)MRf%z1NoKL9gcOaaoS{ma~a{+Veu5Vj40dZe&#P(tGLep4RjGO+5~J)~??1{`R&S6i}C5u2eSGww7aCMk+|QSRxR5 z79Nh4e61@?^d=>#j;vuA!7=Fg8i|{3Yh3GsW6)d&fs1`vQy8-p?n(~p?#M~$73MC5 zugdgnaOQrg%v3w}xym&y)o5SdRa()XXx`Y>P*mC0lrC-VNYphq$D#*{%PQJ>+bh?u z?W&3V2iA){fU^L@JPksegM4w42P6mxX&weYvF?d9Ul7>o z`PR^%)y)_cm43ro_Gde~n>UQ4Z#Z2u)M^;xQa@W8M{Znt`_XIBHNf!27;qVd(EMUX zmPxB8*5`u)>%|gCi$;hXW$dfLu0e>2({=dGRMH$5qLiUgP7yzG2!Q>>U{;Dn^9PB{kOwGCm}t- zr*`zL6Q}TDUR<5TH&RN>zL)e<-Og$!;5m zl2Q|v9~c0%1>>RK2cJP)n<8F-W3ej$MI1}w1& z77GP)f-apRG;&fTG=xLo9UU|N2_JcpTH}-0<>SvdB3iOBXmU`0P2@1<`(Icl-@k`F zRa_{Lm+TL_fMjz~+(fcJWD%Ib{c(Xzf6+uu_8f&%s+M4ER`^;e3-n96uCJo!vdPKI zdMey?rf%mQCr{q7v(8;9xGg<3nLai=d@Mcbu7_W8Zr{FhFFAbZrM$irXNLBzU&EPd zgr9$x`fGNXncOxy%xGX=s`K^?<8H*5jmTtc3CCukRU{tI@n$YCxPF+M#k}xs}kVr$Yt3mpRnBYd(te1i{Q+hiG@rML5o>$w4R1Det$d5yTgZ z4mr4SU81Fl-JXk3r`gamPlxQo0WF1;$dqxj^~2Hb&L2DQ1;?*|bL`bgk`Pz`K_fso z`xT7zKoKU6 z?P030z*eyA1kTifRYBoxTcsAQEv&;fhSH_Y-O+Bn7Ax|G*Y%??ZAts){@0~{bj7L3 z)~%N{4h;=9Jns#^Cm}M^^@+jOmVrb)-L{?Euikw4Y-{heC$8O~w>cj<@lPK)s^HOk z%Vs%J+Bo1N_x7_muh(jH&!h*}FdqgTM-{5_NQACrelau#JlklEZW#)|VpIwGc81IV z0o$zwS&YuVuZ4|)79*Ar3lc5CS;K;W335S7c;SLvP8Mb0A?ElNb3S{#1l`PR^Uf7! zVjEA=syt_U{^I<=u&?=l+pjVZMe2tR4I53izuWAbB2BK0T zHUT(#SUPlxpbDEfj+}C-%iC!Sp=Z}D>>YEJdBupW*I`>XkL>KKXzZKZu>JboiPk-H zC)#%Gm}qDl+_bZQ^Zve?n(pm=`oK*)+8VZ}u0C)EVPcy`x!vE~&^6f7+0$5F9B6P9&I7q#6w~og5K&t724Kx94TW?Vb^vgIcu*f%!759TQqe>;I zgWCQsXL9Qs5ns@ytJN2hu%!4M z0z4l!?L`S#DoFI9IT=GZZLQdUK#;UOa_VLRGwQ(tgMk@mAKWP8Sm;7}87a69Fg5iu zeBe!(@W`vNfZLbn++d*=gaR#2D@Z<$F?BJ>J@^~X#jokvF(jM(lij$84ivP|kKCWW z|9)WOQ}^rRnRn^qr#^rB^ydL!a`?WeRfKA0Me?yT5?NQs z?!M4CIkrCCJ~X*=vbh=m47I1%k4+Y`#s7Lq*S_ugM;SeFY+E<7h#R`MUA0wT%v5dP z*9FdRa^z>DkHfCptj1Bh3uf=pk-iOUJ6pzE$6bz+x0=!$nb|GaL|94K= zm$=rs+g;z>RQSTYysf^zt);%c#dgqr>Q&1xiFdx}H|YYkks2Z$?G1J1Wkp@ZAZ<9r zepQ%+XyRNEh2A@22!iC|vrtEIUD^sWNRIVR|g3ElZ$&~*=lW0EeLuPl!|N1LekkD-1>8{;L0 zhY;i6(KOaP=4<|*oU$RH`J0#nvz^hwE_q=2LUL#+3K$4-3C@b}j?Uy53c~TMZw@sNtPjV1o=IS7-SR8p(-vb7=uwX+J5dD( z!enA?OH*}KNl$4{&?4Z)8Fgc?iH2`7(#&#{Wi5x>)QRk&7?iyhZeNdL5}4(|0rCLM zLip3mLSSp}C8G;i4ZNxxMTo}$^=DWoCS#vI)P%)(K#xMVV`L7u7|wbSy4m#`tj%C| zINHTdu(WnrRBO~ZX^YH4HW@~aX$=~4^|5y-EXTuoSiBQ-c=fGog4Eqivo2EJsp6?f zKOCn_;-x`Z{1g?&&Phq8Cu}@W0xtlZI|69jWR&+-9*neJcQA$nGh-f@XDQ1;9iZ31 z*b2hX{sL5h8@Uu`QJo{;mRugJ}U|IW?36B{Rk zUbfZ^>rY&LJbKi9x%R;q6eY_xHJWaV_dX&&;Xg06k(h=4x2qNJYeAN zDKwn(qC^xQhP8fCBm(VJ@zAljHEKK6Npf(c?o=FJ6G`$jMSqIdQTP_6*Er?kWA*i8 z(e5bzf2=-|G^)_IQ?E?l@wTbgz5bTB-C;vju3Y+5WD*6ZuU>i(nU@doX!hZscME>2 zjenEgfRO4a;;O7Hi$niLw_KnV8whohXr&jS9D+iQhfdI&4}o=IH!t!`>#Z|JFc!n)dED_kMrA`KldjXy`nZRE;2aZDC9FD-) z@I*-On&{NuHfX&6*35t2|H)6@Z+)6iW&Tc&>%YvrS)UXcelzfQmDo%w)hAZ)IgOU# z0a&FQ3yt@|77#18IEaWmAkwIUq}7OmaQ3XBa5xy&C8&XZbc=pCqkxi$@bS=F;NW5C zQ+5oCoyL}dkj%MpQ`H~(TmpnO^CkVf%q#R#=Gpoc#{3wY4GEANAhRDBw>cmu)-j!<+fZi=~r)UDCS#-in z7P;~KF{lj(L9|Mg+=j|qMpv5`Q8x&XVGBa7fytmCaUdxY4;KIpLXp~`VY1xFL7_5@ z=b&;EI2h+7n8=3V&#dth1P$}xLgm|)0}105EvLx)XVLD!t;67XB1zpO(rB!=-gb(A zjjnt6*}Gpn@R5x(|8&_uMV@=(jhO>)&Aj&fc{a7AoJ3mI0RG*mT2w;cf{2n@vPC$c)l#jrsWW5N47)mx4_aw2o*@D6I0WB75P!K!_ zNj9=&6_5ghT_EL{5Do8qWxKOsDH%h>rdy!E?OAYwlWCTmz_J|7o&aYs3?8xKq!LjC zNXCq<9VT6FNi&aIo?J&-)L$yy87dS{2bOPVUWWGr{$iT^?k);fs#CWot4u5C+Uhb0 z@188S9lsajjiLxYDnhn^W_&g?9_}L-_*++=08c=k(&5cy z^6R(<3PKkvB<1_h;7wsqaq?iH+X^7~9sIp4@8(_t`XUZ?PH|exwkCk|FpgqJ;Q%KDnjaH$~0LioCdJTDVkG^+P_ck+hq?WXvBSedBW z_{Pg`u{iuHa2Qj4rXDT3(6Ty#lUltSX58fsD;E1=N0_zo_^ZX`2Wo}A1 ziHgKaGru@|4x6CN^T1*@rd|P52}kFXA&(&PGoND>so*C<{l4n8(kkyQfg)7iAgU*ecfHH%{e*f(%HZvgvg(d=O-!I>%sehAmQwaV7ojyG9lwx5~oXF55?l6P6~a~vUqg0 zoHEZUj7n^?6L`*_34KD&x~@oeG&Il`X|mSqYU1@JUgd1h#BaFvTg5=f6-fCd5wQ*Ch)2tM9z?t;*zztpg`Xm5DTv}N- z4F`7g$GWs=^kA$Cnx_Xc*gt3-T+d;Ja@Hz8dg?(}qTJNEUOJEZF&-kUW*kgUqBhdoE0U&%bu&R|{y6z7 zB4=gJV9huKn-zKy3Lj9Wcz;et*YMuJBDgf1)zpmh=E!z02J7j}cV4OrH(w}bQeJBC0l>D9p2G#tR zPGk>Mb$dY9)dg@sedsE$ZkyX{%wg|bO%6M&hdB+%OCE=Y!JX#iu-n3USf#Am6r(}l z&;)X+p%;-FdI7oP7sz1etr6TQ&g-OQco8dYok73i1C*#_?aTXVLv=3~zGe^@IlvP=KlgdCo=E;5fymE(pS9;Oq2V{BE~XN{Gj6nwV=1;>qm}ce*325|gv+jM#$3 zaoBMw#@X-zOoc!lU-ou7{ju=J6Y(457o-Wc-_Yp{6Z0RDlauxyrzoEPcp{BET6~GK zI%;ffq?=8yHddJWl52L}+~~_K+LoAqXX`-2nTI!=8HU!>*0%kIWaHSTja6%In&=%% zR8}U&OsVQjeYEK0$xK5Hbf>KpTZ-ZpB_}Iu*NpC5yLRU&Y=k1CbKRN0eLmUQ0)OQA z7$aiWbfAOXu9ndJ?NMD?vPs8Ek-J;m34asven}@vAfQ3Qj)7gV{;sq0l$OG{fKa}Y znkFW0+fb^ixVP^6m!A*C0b{5@7@QgNz&wPR9OZMXepUWd&L!$UVEBZR1m5O9A*biE zUz0G-+#A#j#u#}~RWZ=dNNI-lR*Y3; z=YsFwz-|Dr`BlRfjVcEb>hi4I6K$HcP_Aw{F24bxj!w>rf$pE#5-62y! zMk51YwJz+;dh?39;$_VNmcp z!8R>P_c^^%eudKi$GaO54&OM&Tz1RT_B=X0{phr7vcR(WpZp~IVo>M6_Wgdx zS7hhz*Xz0uoT3M9={e>(Fa8A2c|hc=R`s_lrxG0t|Ydj9)cTe5TDo3nG^p;!ST zKLR0UE^0v(1>8L3022Ly2NHuf-Z(71U3%1p!r7P( zFUMW>g!4vtIqv3mMAU$nN?$Nd9X<#z$K3(K{Q%neOXj#EheSWeIveh+A86_sn!a|b zq^YGjZk3m{Omo+%%wpw*1xh$0$X8&4wWRzuu){C*Czad zxI+=!iZg_aIqwiw6$|J0a!a|4_|@mU-wtP2qi)Ll1~?jW>DIzz=|BJV;AN#MUGVuk z`B-3jcnC-vWu&cxhvU5x3^;1Mf9S^t2~MJd0! z%1D)74S@|Lrm&snJd%?c@zj(V$!I7uu6LuQw>n;#h;=fd9Sg1sQN~P~GmPJY2NM0H zxBN@$;oyDPI5Z&hNgn(6J*n;vdA%&z?Rt4lCrjXLGMD`uE5sG8w|VFEzltkLHWM?> zL6i%mvwLGP%;u!~dug@l>}aoO(%B`xa5d@dK&!APaD56WUeW}dvVKOkW)sB4%+K}o zK}ediFPA>1cVRjcSLE6pyK8>C zW17qFS-H2d!4d6F%zyvUO8M^$Xtq{#PcpKGq#V~haYY4bLjVNwy_ZOB9radT^7a}2 zc7gLY!9D;J=I<4!gK-Rto!ooA;LXAST|P&S?PtaG?U7LNYRavIyXLP*p7@pd@Jj1r za^;7FPMTc#$5)Xnzfr>CWj~X33Ey3Qk$Qs2vMZB431nKgM*o*n>m?4@MaQpjH1ugu zR_4|}?3Te@^OpnC0OWzcT4@PDN9b$1C~Huo61th1ZvwT?S*W)Xf!Y`WzowV_v>Exr zD7-?g7nUwKHciX}B?wuI2-OB!+&zuL3mD3UgdZqs@`AlNsDwB|RqO@M?Z|~R<@Q)h zFmDDxV+K|vp`lPht8oGNa z(R+Es^yp`K`oh5DFaw!HVQ(Tr;bgVTgnqg#0G67=`F(YFTxg3Wu zNyNT_5JOZ)fS&~RsFK(qje4+ii>)Lu8$%k^8rlz$tp^)FFRo6yw*LNtYoo2tzqY{o z2!G9-4PPS2a(fX^_gOg~<0hVN%jThhHSKjZ=4ANsbR$@o(PqSmH^CTc;^_j)fyA`Y z11*fynogwIz2PLA3xZ=^qNe^YWm`Z7;^}gLu&pI9-K2+jU=Zsm-Cj%=>J~U^grl(B z9Ejgy7kG+oI3K{ChjSxcCquvm`FsxBzR^t(R`dV|;Q(T}bKioxn-|BmifMe})ZE=0 zZ(i*}9y3B_`=zX9I(-SN$@ka`UGKBlX_ab^`ZA@{-8(17St-G2d)~R3+>UcI*@bhH zz`3EM3a3u^X>l4LsANUfg$vSZE7el^R({4KA52U zYmoR_jn`ulM5s(OId^!3gR#I$RVjwfp|VEyaA#Elvbl~xH&zmuV&pSsVF@u_^E<-! zj4OIA`H=4<6&88ZN*BtiawmEF1GG5=;v$nLNcX+I2|M<|3i7Q7Mnm1py zz30Z>4cCnB*<`RN0s`S)msbKi?B^!00W-qMchf624xg%axuLH zN=As{>c{Aj^q_`;WNxtL!y3_g10guS3N<6%ZHUR`XWyuQ*Tm3e{wQ;se(g!7SmEdm zfe!qw*nAq*PvGB$PqC`8$~J;9w7`p|?lVRj*d@GxEwM@=1Ax@kJG;*I({ILuE#ibj z$15VF^#*4FIP2u*iihDl7VDM2Lw*3mPvI4papis*D41YGF| znig^l^dXD}Bwhi67d{B0F%PfwM{O&ClNaSF@Ztgm!P@wrn59Qq4J;B$fsvf?4XVcq zsEycf977=uIHYV*4Ly=dolT|i%KEi4#~*p*_>626ld~QZf7GBJVLc7@VMwfPAM6;+ ztEVxM>s&HbH1H0f^TsDdRRViQvXUWxO3gkp4Emu-*l$3BYrKC*sOr=Z6CY%U$H* z@69N6;p2;5&G~#3c{rjiuNS;mYFe!CPIMxEthc#0tfqyS=1i>#ij^+LLCzwB+-(_8 zYsd?nV;I>C80+Qp#CoBx4r8|LWjIXWpvrAH0dt*q1_HWr4}9R3q2TVE=a}=NdDzt9 zkz4D14Cfi@mfjzNZ*#W$t!=Iqky*BGMN?xP`bd|wmbQl0t?0=%tYsmcjcT%x-U9@} zGQiUvV+9FP+-&%uVa^+jeJG=Bv*v6fVCUgRO^Q0@*Sz$qSHu`a7N}m4LkZjz4(ZN3 zmyhgQtU+$Sk6tmnExk7cr8pt1Q4s?#-=MyetTL3cwS~EPURLDB3ed<(a1j=$@QyYd zia6OK1d(cb{x6>LULOKfRrt76w6Rpdfcj;QH~GVH11V5|!-ca=pP4XyafZol_&0a% zxDSI8<%ritGyv;QEDL9)pPmMEw+^W~0oNK?QWGWr+ObG%`|j+B;(#SvUSyE5a}X~ zEHz=Ibi6bBb|AK5YHV!l?E#LAQ)8)IXwVaEFg3c42IuDZXjh1cncIbV_1a~ zQa1!)xS~=C11U5Hs#t1Wv-$*l?Gbr}qTK2el5c}f+DNyJp?(XZSg~H>eUIjrpJ+zi z3H(2+Ce+vKpBi8Jn_qeK!iA%+oIRR8nLcVf|98KOtqVM&4uxh8BmKn>ui!5p^Xo&<5aGLw#d|L@l^lJln0Vk|DT?7RP?$%s zOmvxhVLo^!$yxm6YEiH zM)i?p>QHP7REXe~!W&D(uZxB=;t=27R-&NP&xqe(B?|k}lvun<6#irqkM`eRiQ?lf zX7}X*Jb2-`&-0L>)uR{p>G_MF0WQ|Rk3~2l@qr}!$nbS@$a9o|3W0tgUZUaMw!l7d zd*Wg&avSznrE1mdXp3usgNLCPM*sF~aeb{#^;K-ojXSXcA*-4|)Q;rp-=tJLq8qcu zb0pM2Q&>MoV>!sp{IdrI<6b_qinteear{smhe;dD;#|)z?gg(zi-bfNAWN1afCRD-iw^5< ztO+dg0CL7&a*Rv}u1vEBMoY}b` z8$In8oF;1n(w)l9Yjn@-?bqui=@e|Zqk@ ze;OVd99YY=9fVOD>fza+#*q;L0|-e6))$qsAc8Ss2-Z&;J6Xa@ig{$A&^ctb`M|87 z4sci$LT2%{xLpFz2@nQvE2!fs{F$=u?(7VERt+7#ZYa*;&)^qv=UbqD6^IT52lEs# zM+mVCorfAn$e~)Uy8C)>?cY+khInw*bW?7!ykZ+bd`6c;^6Mj>dE#A7k)oV=>T!tsJ`;$aazdaV25PEo67bjat5BKlgHZeTd-BoqC`f$!! zyX+vU2VI88?PG~FC~}q>;`x^=-0t`T>r{b1xV)Qv%g>fo!yVj%4pGOi?M$GASN=ki zWGxIP3(&zF!}%qZhyPCx{f(DiR6Mv&(O3LJ#o|$09P7zi*_G&=*LUfD4-o-!U_TN=W!}mfZSEp7a)T9V?S(Ezk4)`s5;0p!N}c z&RzO=5~ZZN5^LHSPJ?I-$y^i4RS$HGw!)yvf+55toSmkquL&oOQR~k+DMTZ!En9Nn zY>cvHaW!CZ`LK>fZO(y3`WkExJW5bPc&dCU*q$n95!kUR$k3yNgSpQ@Bi72iE5cxr zVZ_X7_F|Iz%c0KaOM&w!d?{yn#6}*cc-*{oD_TM~)^>l(TdJ#vyEjZWoIO~!_R#v9 zZaf=Yw8%5N*Ho2d?$kf&XlOifB=aW9$dm^oa$Z{X0efCrt6CWgmB)96TI%bnLFo8G z5H?uFlB#Z56I{=E%8ENQo3nsO>f!xTZ3Llz8nYBSJEK{Ui1H)=DnYZm3r)oyF|=pS zE!*~0z-29(V@z1INr8pzWM2_{Pc{hd3S`-vh67t6B4@GU^#~DRI$Z689=gQL{ zd-=SeOtu~-%!*LM144L?64PUc;{Z5gB5HBFvu6i8EyfKtR!biuSB*igqj?6gZU)9> zp$n4>ud&XFY-~+?n>1*1u^x5t@|U-Te`2R6c!BPX$ySaw+sF0)J@d0 zJ0zITQ6Iq)sgI!|RgC7sQ$!mPqFA3rf`C7d0@+PPDnbgh4TFp2L_2sUL7VIij|>%n zmuQ(V_y8&LNkLG=g0+!>lzBG}1u8n6)d^4Mb(HJfdC3@bVGjI~}~lIkFbbW1H>CBdv6ihP)FTDKRASO8hRuJIO-= zj}$tc^+#o&pb1;X13AAF;}ZewgIkgb87;WEsN0zhDy58{ceIUtaaYy<$FkKSlXPP_+F z)wbk}MXVVcw}Gp`U>X1<6dY*0xUT2TyHNl=UzBO6io};1``0yQ$uWIj3XPOX8p!iZi!t3WdepV7y?3CSVrE3&6SBCr+l> z!bJJW_=fu9p=qL#q)U8fjXD?3Lvfcx*+mg6li#gJ!LUUDWKS z2>2c+eK?#E8D`z=E3GYL)*}EeF|Wx}Fo5bt5CGY3GO)goyUocpdu7G-wcht=*t#CH z{`a1ol@up@=6K%f@u{u@?*BFN4Xj_ax-ZFS#EKHzU)by6-U|5%PiZ2_FrJ=Z&?l26 zbQ}w6()lSM1)^i6hfH!E?m>&Zm`G%$U3Mwv5Q)=d^N(SRMgxwU+fUu){6oxJk2uE= zpP6>pa>NakBZK{C$)<`5r7Bu0(BA^zNPpy@pS2A3EC@xWR68n3kkaxwgc3NfV1;o} zi)A1W0HY-nn&dSA;6=YI2jJK46~-)e0~m}kDL~p)kSB+ceL&4uR9_PFP+%*dYfph` zoQP&+=IWpQ^Yv>T24WTWh8TM|s(l3-GVqI^2b+7mHq@fuwPr6zC@DgpXE^F8+QC59$O4P5m zNj1;;7O;AlhNkzn(Ke?fuW+KH%l@67vTwNIE&61<;2ZBtA-XD;wXyeH-i7cp8+(! z7?IFY8m}1shpeY?Ic}lzgi4r_Gm2h#`Dt&yk@aABa#e77BF74(Sh@2En#?%T4<_qq z^D&hH5J>{}x2GYD!rNzSeHv%D_5DkINH3t?stUH$C&@J;mW&jE_4H?;$qi!}~g&m8wkP68LL zAW)6ivo)#<{Wwr5uPd>34V^C%_FbxKbd9%Tdl6cQ5*@Sy;|b9cg0De47~%<;mdD`l zl9(4*Gy*h;+{tkZoam(P$lswlRd~juIV&ZzZE#X`79~x$wHt zK};x`VjE}1yx1?i5KE&n7%Yu9CR>Br!OU0&g^|%oxKRMaah(nzE)Blu!p|Qr+GgP&1$||lN~JoQEW^#j#vbK*P_SquqL|G> z6re|^xSUWdr_+7UpG`cA`JLlhit-(x+OMS+q$rpe5Z{b!&{v8)exp1CT_W)P**#Gm zxO-eRN3aPPxDxytp-z$ZWNV@Ijmlm|n(2hTa&%iwXsXyapj@p?$P1?Uc@g{crL4>kEf9d}71&aB9nQ0kCVMKdQ}1aUJq zxyqOzwub-$0I(*%iUv%saa#bM^KeIA0oZuVzbTkmBltiZeX2;+AfdV(EunKLt;;YO zJb+(CQKquN@Ln30u`?CS*D7A^z{q=7oAkEy*lI`WX_vOuwQH!osHC)bXScIY{g-wY&t7uTcpRM1W$1P#+q=k*+Oq`J_O! z;I6mhpzoV;bUTT;m!lzYAT8Lb+e7|Xh{+@-a9DuO!~7xA37$@1c;Uit!iXrIfwlxu zuPqSFO@J{HO%{x$MLo0~+AI-SYB2H& z`#Fy9KV(0bsLp7;{amU>qnqsKa;(kN{<-t_AR7GE3yHqz{=~Jnrso$f+;cuPm^d^$ zJ)GD)IyyFleN-iMcbmuQiiU2w#Tjbc3v&u+j}r7 zr3UfsAvKHNhw*8%8pZ#{kQ`?|O~|)<)eQcZ2+X)3f2A-_3g6y|e@|oNUYYT3{B~ST zVNQ;En*5eli|U-Z2A|L3(+qx{S6czWyN#+H-raWK39rT9ZYDR+P}V%|*FyJ67<|*AoF~W~ zn~?h?FjE36F^}tM%y>@lay!18Q)lq+@~btx!tc{qYp&1*{5y}?HsHT&aNlXgJ6wl* z&B$LV;N${6UC7%T!x%ejrQ`^-fw#iP=!QIG%Phv6lVdEcA2i0Zja&-l^^kH?SI)ZI;6bpHk zx*D&390)ss7k|3CUOhwIpl(#pR5z)U>SlEdknsuiHuXvM$Le|Nd_tJKTX z>(q-Nv3^zko_eQxvwD~M8}*QSw>kx6z8uT^cUX>xL3jVR`g`?h^%*SmKdFCEpH&ay zXuLvwL48hr9t-h+x(!QmJCHwv<#`j9;SOv^9^6GN)Sc=sb+@_~8}mN(Z1pVl7wQkx zbJYFn*VJ>>2h=0#8|usIU({FB*VWh58`THZ>(y_oH>kI&SE}DpZ&$yiUaelE{!)EZ zeN24_l>YajlzmG*s=lrMO?^jwSN(_jp8A3MclCYs7_x}sx_YKqFZ&FZr2@pjqcQIbwYRPb?RT$H+46p=^nj7_v${~uQ%!e zG?4$fdP)!JVLhTp^(MU;QNUZ!L2|2}P(Re$^maX|cj%pZm)@=SKoZ}p_v!umfIg@% z)0gWj^dWs%U#XAir|F~mn7&G1t*_C?^|kthzD_?~U$38`Z_qdDXX=~uNqw`vMNjEd zdRm{>DSbwt)wk-jzD?h*XY@HetLOB2eTSac3;Kdy)OYH;^xgU%eXqVxKTAJb->;ve ze@#DEKTrR%T$% z_uuKi*Pn*X?SJck(4W=+sQ*cSPJdp1K|iAZS$|P~30)TcMSn$qRew!?U4KLWtNy0` zmj1SWRR5d)j{dIxcl|y6efeJ{WJZ9{xAKc{<(fiFX;>_ z8Br2~v5s+Y$Q+F$@3|C3I?E#!kxB?w)sdP=ZKN(zk6wd~k)}v9YN)kF+9K_dj>wuw zXJl>c+|=~^+-%kP)O>pGG}%^aK6SeIKx*c~RMEl3`MLO%84;U01%8@}PtBd3n@!zb zK0TeDpI$t7W+rua+4SkT3schsj-8&K!gR3%m~@JN@e&6kc>bZlm7dhtRkHY4{hn!!C|v*xQ=`6_yF_H2C4PCX}6SI*tKID2+# ze(~JQ)Z&HMycs@kUVK6RDqryL7rS8QzQAW)G|#$dCM;f@Phm-kHLmr{&?xPtPr$ znn|6VpSm+ucIIYGIg?sgNXMpz7tT$ekDW4?X>&PkE-7<4V=iaSU0|TSV)^2l~2hs$WLWcTmv)3%)$(CXg))J!>*FgCBU#5d*Rl(dGk6obMvRq zq|T*h)3at!ExtH6wJ^On%P%TPY&FHd~VuKGgY_KSggEC%ken0t@K?0cjGaOnVCIu|aT z1s9((&rk`{K-2nNTFny-$U=R0PnJ0{ar9?PPo*O;N#m^V5uf0Z8(OnFji0a&`gDJ$@@M#;V>if+%5Lypt->-rv!aIc$>$2o{>%^4 z0%w*tzz^+@@~HB;+GmjFH^U{(XT$pBbA@G;_6KW;dZOU)8 zLEdS=0}NEYC?yv)6B$-2pDQdswLkcGsIZ(>ey9mBRx{EtRQuUSLG4U{hnrswpS7R; zkrkHPnjZ|ymCtoqeAoPJn6Lfp&rxpJuzX(=;KlMq?LzLOR4!zlV8gJ@*bBL1OUcL0 z4GlAw&t<~Nl)zy@@q{%2r#o5NkBeV`{IE4E%2ZfR|fw$ z68z)x;2+ln|2P`_<9P6oYlDBx2LHG{_(yPsE9Qg01QA{l#Aq}%dp5QxD5}iDPPI=B zPDXW>^f{$dr3Eo}4srF?~`Mcamrt-N24;G8^R_Xs$Ze%k7tU13pJAGjh(#D*bu@s`3{dDUTOl7=F6*>G} z>C+c(l|M_T&ZO}an@6{m-~~^mX6EiHmyLaVX70|Z+pEu}=FdTNJvC!QnRwcWUeNP3 z{wsst4lm5i)P-4S`!JcpHgHjX(bMM_&f_~X4iOLLTYQ7K7uW~lun5o_w+{Ndf{vSp zC14T%Yln9IHdq9{2Ca1h`slZy<39?GbW;5o|Jw=Ovm9Nnnsu|<4L$5dY7ca<7pwiy z!hTa7pbn-khYt2CbqM;`>(ycXM(AHxQu|Uzpnbhr9fjugc6Bwhu6L*t)VkDl)VkF5 z)Vb6R(1#v_J;1!%DD-sqZe>bed(D*z*e&>ma0p@y{*|{yZ1ovX2Gur`J_}Qh{M!fl zY!+4v^ZvA)z<(rvnSUAIR0GSz8CVaVf>wMDEG8UP1}g|>FM_>wBeZB%M=3Kag0 z>X>T4f95+l7XVQM`vEbJ-d*?)t0C7S*eN2ge)zxCdB1!KztCE&VI$#OMOX`(z>eS^ z8debv%Zj-JBX(^b literal 0 HcmV?d00001 diff --git a/docs/_themes/ceph/static/font/ApexSans-Medium.svg b/docs/_themes/ceph/static/font/ApexSans-Medium.svg new file mode 100644 index 0000000000..6c624ec970 --- /dev/null +++ b/docs/_themes/ceph/static/font/ApexSans-Medium.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_themes/ceph/static/font/ApexSans-Medium.ttf b/docs/_themes/ceph/static/font/ApexSans-Medium.ttf new file mode 100644 index 0000000000000000000000000000000000000000..44c281e3391a61360b46b3a144eaf81dc778d0f3 GIT binary patch literal 169168 zcmeFad0<^-nLqx#=iGfu?!F~AcS-Keo-{Xm()Oll(}k8Uv?YZuN!zrPvXmWU6+yA= z$P6fmxQwEM5TpnwGRzDDgQ(!3gMf(3sG|%bRINa7zMtnk=iJ<+Da-de^ZV;pcyrD@ z=iQ&>eYW>KB#5YjE+dupEnTzvxVL(oU8LMpi>EIyJ#NFI-Mh|NO3J73Q#`x+xVDb| znwS;$=i|ZHj#J0?J#p!UtBD*DBGV%~&OJM+R=9pl6y^8XUHf*Q>OS!1ZX(a~$g_6$ zDHrUz;@U|&QR>&EoOsts6XQGIbZzcN-gl9@Pr?KHpDg$-I`O;qq*Kp6|J0A4`xE?r zfyh=k<@6on%BIT)QU9Izz4+Ae^Yy;&mF>gBj?$*KMhPv~DxzT3pt z@13!C-_#SuznMo&zr;^FDY68bL+!sT{*B0d@zfJj_nJra=UhnsQ!4oh{+>pdE&Nlc zk@~0|cjq|P$|tIl6$iTsYiJU zZM;m4n6)9K7V#yjRBSXT^PVZ+)r+Umvr!sEIsvHzX+BanQXkSnB!1S)_gqevJ(XIx z9&svlU~C!nAlmvm)nj}clq+aNxfj>1NGp*J;P*FagIGv=#oZ`(6`u9d26YVAN0FYu z`!n_H=Wsob3K&Bp-mT#})oW1i0m>*p!S!;qS4GEgpK+~Ms;EsA(ElphtGLmJc{C#a z8)KP=ltBMJO>w1`s+0(x`)Pr&O}z+M@%>u01MS9;8fE?wE+amLcfX+lSuRFHilC_S zB$ACP6qTalCzJuWszeiw<61AyqzpYuS)L!4Euh3DUjxotweoyuLb3}tiT zj~MUGv<7fZ;`x>0G|U-bg8O$dt|82SM%HlxJuiMty~sbvu##|MSj|f6yHisVb_}<& zBoUG?|R)QJBZD#6~Fk`qG=}O!)T;@n>KVbQL^cQd{Pr^U-9}FK{e}wdG z2}TU3^2B3p2R`vw%M-&1Jp)cU~r+NOA+|&=0dX&dK=x3Pc5%Zs5 zTH<@Ys+*|;X;4198+gPijrqHohQ&41CGQzm87CQM7&m|y%J(I{te5!8c%8)f(I>{u z@_Ar9El(0>xvY-6<#QtM8IQ|TWa>#JGWC1R(Gy7TAia!n_bGn>EyN|R|H!~~IiEW) zCmS(0-6Z6kGVU||mCdQd|4&h~91q6Cv}n*ni6+YDnrXwJ3yfdlE8rql0cT&A_{K1> z%efl_&Uw*Z812r-HGo7&i|~6R_(L!F!{-6d2L1UraK&@r;)s~iu1XG9AL!<<$hQ%& z;yMo@Ek)a7xK^TFel}N1ps!rtiI@Y_2O2UnT}U`~p^T4W$_VBd{1bRNAGE@>w?Ldk zUff5Otj-sGnBy(9SUg8Tyl>@uIqvu2om`LXANP|J#x+M;tlID#W0d3LG3rUqPw&(l zqIc>KNdKj}K(9QPJdb4wuwF2OwwWK7CxbVa(YT~{=F>(3zt(v6)XzZsVbJ{{Tu(r1 zlsp{#n|ZiQj6(irgO?lp+~Da3Zz#J;9J_{BJ9Bt72@I53XscqZY%u+VXi@vek>Ww0?|ByrKeL&n8X`i-&P z$+_h@;kh#AglSM;k3f5!2CeDzDft@uy|F|S++Xe+j~#d{$B1Xc?N}T=;aO78}T8!(bv=@(6JymJajq)a*K&qiCGJ%Uh5TOF(mjGQt z_t8W2ZTca-LVu(;=v~numWfNmO$t^?L^(6{KH=vn$XYW))(7ENLWYKl2ys1thH}et=_U)ccNA?H8u4MqN!)6emM0Eu1`%JntBrL;9v2F+V!yO7u>S^ z_v-vtdx&2B~C6D<9IUnovBB$ur z%jrcCz^s2nTqG{xXW|O6UtA|{lK1kjo|E5weEPn0cP^br=hI7c0sV?TLKo6SbTMZ6 zQsC!h^ijH;K1RQ$+v&4(2Yrt2#60~ceSyA6U!s4YFVk=6xAb51dwN`4Nq<1?uZd64 ze~YW=b@561qqv&h5c}y*SgGEmx9Fg_j{YpJr@w&f{Z-sZhs3q?H}vyuVBI^AR^A22 zJ1qW=isJ8SO57p{^6009B5o6^_zZnln1or}E-d1A;tgRHpA|OoU*b>V4q+Fc6As}N zE^()Di`T>-#pi`bRER%|FNjLoCoZc#1n6@L|zA|dV*HR2m0N#}_BMM@kHX?m4@ zDgIl$MZ4$;dJ+=wztPil27OOt=yckrIO$~I_86T)r_y%Nzyu`pljtmZR@9P(He(+WC=tSB~TWBk7!x(O$8|hQ@cfgU) z10%jlchT4AZu&?1I^7FeouqHj{qz8+>zni-DEC|RFsS!C^a%YkJx2dR|B89JTw>ry zFgqUy*SHKAcqUx|O#Ct31YG+OFz_ezAHdHm=?9?CAJQ}Q@4(Sd($DA~Q0&!oE?r8` z(KtO1UULcUryc0kHFQ0&6;NkLNC+@29;FxPaXN>-4}51x{FGh&0eBx*-lZx*jO-`;^Gn$$#Y>Fo*d-<++)28I4dCBWKXRDr7VY1kMB9_*##Clfj2rhDt1OQ?DG^rIrcWW4l~ zLiZ810>(=xX=Hq?_db^hqE%0Gaxl5nXccoa-rF)+(cBsyY?-V);@zYe-hubt=KB;1 z)g@aeZNp>95s4%iAbu?0zkhjV`=rP$`bMP?!2nwD0b;=>MbpIo)=V-vxF0jtI)Z5)0lExsYQ4`c0suty z%tBhHmxE}4*`g^qImW0~SUY;ZnlvTj_p4b`Y}2AeKqYR(H7u|DGs`n0W0U4#;K&#w zw?afJ=`(9O1>;dHClCO3B-aQ zV+=JjVi+_;^-3A+64pArO3)2qxTQHt@Yp5R>9_`Zkhqo^>^0`6 z<~{PvNj}LBPyiG(O>e0WP(Tu>(cFyEX<3?quOkJp!svTuj(0{SGf2MnJu=Q{t?oTC z9=(rQF2z*N975-NleB&m7>q^QpmY7jLzT3Hd6inXTwb}1R&+VP+MY3ZSE|IZa{o*9 z&pB81|I3ZdnD2)F<;IQ3kv=bWhhLksDX{dZb5? z4#5ZWEF3+L!w+&Zk@tBbAL6Ez?;wj;eu)bs)oU_&jh5w)SuZNDRG zf0=0BtuRDfL6k>3odF~~>q42XQ;;5mTWBZJBSbxDt9Kt!AIkOP`Cu>6`~uMelq+0A zG=zRFLOsK{UySRL2+_zzL`!cVTDF;JIr?Ml^vj?s|jhq(P)ZL?`3A_k5yL(7)4dNRJbp(MfdXMM#ejofU!8 zXeZKRMCYQN^A;oBMRWn4e+1<|g1Rp(AiYX-QIhE5Q;06Po9I%s^-+xHV|aE2@_&38 z(I;@d>RF;sBJD@IrkCj214P%25nb7lDYQJaarh3mKR{5uDU{wYHA*dWnAqt1WDyT|eDyEhX(afs-tYlxmk-QPo9-@l9K zhxJHz5o=~bei ztw(wcHl1dopSKhJ;#Q(xy-W0;cM|;uefbUY{O$^(-;Wag;Sr+$zMWv+Lc;GizJ&BL z(Vw0ndh;QogPrgqqOHHYMsx_j->HI26LU}ufTACRjP8PU{UH)2+*@#Ot0iH-orLo) z67G5u9$1np@h*UO;fqMb7L%y%B$1dTk&KW?okSwtOCoa>i8}Zev!f*H@w;IkiALlD zM-fejNHjk~q6K+cSCVLZnFMG{bUaHUe~?7i%_MpvBzjLF(cesB;7cUtUqNC4$`uME zhJHO;o-@2Ft3>RV>>KsFV$C0+5 zO=3qo(hDSZPLi0|M`9Q1*>xw0lOiPc+(2S4-k)X!qJt64#;3^>}x~3nXs5h{R1VleiguxEXc-J@Vgz`fo+M zpT2^`XYlOyb`qa`ip1x3lDP935}!x?UwnuJXh!_QArfC%4Ay=ZiLb6F@iolDKccR? zBP71wN#dSS689oaBHi~miTevA4kSrDfcy`>K;mIM|JF$)zK#3uEFUR6`-w3UPuWQP8}fY*<9+6A65qd*#1ByC4^JWS>10-HXJwLmO#Lt^ayoz$aMBTr__1CwP_{~Wqe)}$o|LP_2 zd({0J^8EK>B>tEr@don!X(x#{_mOz(H4=Y*h{Rvf$3y3nc)OOwJI|0f93fG>hQ!n% zQV2=8j1=`*Qp``0Vny75eIF^#2q~^pNO5l_#e?@1^>DT?BgI=qitl_<{Jlsh6L^S} z;0vUL@jOx>1#6lT-wDSsu8Av1sdwGDpD3t zk}^_H%2Kqk9QCb0-&WpC%CYN7S#2X_%_tJ;TUQ_jYmc)2ASoLwNjW}=H1;9VourJ` zBHd2P3267kyI{v$19ScYNEr(5ZLnol&}R#d3Xj|6bXYAWYtu24{cA=GIlIklQWb$L zFe@M_So#40YY2Q#<$G1NsbO>cn8~HcUOO{8XF(>QmQl_w!lrnkb!IjWqCgWcmZCP) z8!Oe#r6cA|nXG0(%Bw0uhutI zS2u9V)?QWHr`i+PB@0m^uz7Drzg5_sZ!P#tCdhhLlhtD8xkXHf(RZI&HK}L> zFyXnhim;-0dDa_%fZrShxT=s-7Kw2Y{2!N<@LdE0%AkGRVibxQ_I-U|GM@>>I2T4F zfs@NyGe~dMTvc-rNgR9||4>wco&0agLfQWsv7?Z#NmN(GW6?+$6X5rGD>aO>+b|%B zXO%o2C;HY@Ayh?ClTN#0by?64E0CnxZC1!*!gJJ2)ed2^S#1(FjI51ZL~Z_nB^X~n zUA@XR-YSzdV+D+?nP2X#+L_+bvb$wv^@gfd)f=03H%}m~soqqzwt7>`F7e#bubp$w zj-@-!!arQjL05**QW!8Cr2i^Z_I0|_*jIJQxt8?l4YBq>u&cK# z6z|MM#DDq&4plLG)4k1Ww~8HmRHtp1&1p4RDyt*@5b)kyk;pe5zj4ROs>`?Q{J3NqPMyGd>MP_>w?U|+FPceqN1I+Kq`TY`a5zN5RVGg~MBX5_kPY{Q1)xV+i8QMAk(9PH>A9GrKv zaZ?0yJnR$Ko-{FmXvdv?6s@PPqY2BK_f`R_N zUcfjT36V#5oD9!2;JI{p6PRO9cVo&5+9YMZz&a5G&jM=OK!+s=xS8}Y*SvsNhD>&_ z5C*XYfby6naG0ZRoUYDnQox|e#5J1UzeYf)R9^^qP+q%3Uw)2%|Jtq4j2KwVH@JPi zgBKR48AGLJGnfm|Dmlh=*g@Tk|QquFNv530s4|H8IyU$}g%YWj}5(}MQGAG*B- zXb)mM{TbXQ6M*CPCYbszsD_%$qYm8o|b~#2q(a#Qb~rJ>$RO< z7lLyXOe@;V_JplrYtEYM$@PSD`L{2u`RJCd3m2Za>GI44{pKvJzp>~NE zZgSf!j28*ULg*%pW6afO;+ToCLz0`utgY7mus_7owr$~=du{0u%iF$6j<*(wkE#^~ z@o{vx_5tQK3aSs|Isebg1u_BI`4Ulitw?pHM4|X5^n=>%{9Ni7=%bSc9$>`+<+YU( zf*TjtbqPQ%EPw0Wl`r^xM3wVktSitDO1goe7s!4RaQ127tV-20=(6DCUNkXXrWJHB z$cxsty~WS}KrOxth_S#ph&81OYl! ztSj$@wpm%Z8z{_VEfr#DfUNzY0AHYyGi2qWH)EMoQ^8c1=3wif)B%M zZ)?fbB@-?O^^5+JrZIqp88V}pg2H6ZK+Q-4vgRNwMk-4N)p%bvR1AQJEfE)>Sag)q z2wG!S#$zs3gyFJ`#QK3&2VWu<1Fx-J3!oD43Xdf~TM0n2xvmi8M^+TF889?mWe|=? zUvG!6vZJpzxOSpHX!pf^(b}*xwBVEti}qEg&R*CvlyMeT-gaBMCGK=22b9~*c9&&` z1%7?jc!j{??5~`)Rjtf zEUNvcc%pbmtSgz{I?|Y5x7%&DxokV^9=q9@8l2ZUFQkgAw|!p`o|cWv*KO~t1WFNh zb|7+>*Nkr3U+{ExG}b2*kx-?_Vk)f{9o?8TsQyY7bZX2TuM7|n7>YcZaR}?+J1}1H zy~*T}>RaEeLKwD+jZP>uT^yNF6|d`BQNCz2*aK0|pf47`MHX^5NO5}nhP(|uE&NqS zqNlmBBW|-s^UcjY35PhLaZ`RCh{+UQkw5mtHh1N`6IQGp&sPQq_H5`oG5wopXH!+8 zr85}Jx71WOcZy5Ao`xkS=A-HMWgD{D4a?d)Hh=Wk6K+3iQ3bE>=p^&wEO5*M3!@89 zEM#GwrNohAj8!bEATMTtsSRc|wtyJsMLh?ef|956LDb_lS1Yc+WW+c^EYWPGd6rCq z;xw_cc)mDT{I+uaw#A1xqB=}#D`43uVVOY$>2-xzT`kK1eLc0u)E!eD4}h37Nx*Kb zBdv>CVZ{k%OD_N{W}XUg&M2ya3N)t507WsDBy_H- zb4v!J6y#a=Hh`QpWQQJ2+MmUGMRU46;dWHD=W^{;4tJtGz5EzYs;{}JKjk@Q`O@*h zm@}M-cw;_$qlSTd=kz+TGFUcGosE2O3rl)YcBHY6vtp)Vke`9o^BEwF~MnzaNlRs=$2%n9FX6 z%b*od3P_dF-avAbT%&cmfkKXjt8Z2z@ChkBfNPhE!#)G(@M^6U<%>SEWhyGujL;UT z9!p6lHKvo7FmbOzES8u?EX6OsH?b%-rk0wPd@$JAk^r?7FMls;(e3f#JCK#bw7%d7 z1u8u(GM8vY8rp$&8VAt@QI9mVr!Y+*5bc#*nx%Zgj0N%uMl2;yf0-EVExRv?)P~tz zoTuB|i$B+I7>-ikko}9&0_iV^x}pJ}?k_0RyIG8sqba_wwGjx$s zUwkE{WV$zGhf9asqeC!Q(p6E|>rZpWe>s2ce@ z(2s)A4-5Ly)068m2hAO~-6lN6H-CQaxx3L5&Z|%cy8^!he&fX2aZ$n5+FY0Md1rWC zn(HBdP;xzh~f_;fJrQkI)6j#zUlm`(VcHNCBE8xx*}mS*4T@orzNeb`XV z#C&@=)f5jlc|-MeHEx?XT3-`wsIK%ow_6-Gb7o*gu63t=YQTOfnG)cn}xlVjk>;WVI>{B8TfwkPo_5L0FX`7{(y=g58(bsi}K& zwcyk(#3!N!6S~bUIF8nBlgYtA)=9iFgjO8fbK!7En=uA0#rK(maG{A~Fo=yuYXYJc zWGS{PTU0w1AeJduvgA86Dp$A_CIAJi8b2(bBG?eO{m)elFBRjY(Ex)YVuuhb6$wpf z(U!Sx*lEf+awrV#&p=R*37JeVP4|Rl9urJn_?f_C4@BG1PnDb4J)YX3 ziR;J5uiddA^tBQNo+6Bk=EIEHu zN2-7I=3SkgyEd=xPsO9B`2+P!$IrZEXy}qN$CuV8dRA>cZD7?!o7w?m=BxN~C2*fT zgHIMJRaJGUx{jiIGg(7P04|!O1a<&!rKu*cL6@W@*3^w$$O{(_a}Lg0o=Ym~dR?50 z->EEFl{uQB{{vMtMZK*&FC-HM3P=N%)5f_==$g7vVQ0yUOfF*;$8Y^s@f_h;E*xhS z4lPuUExuTML3!t}`N=1F{3>?nz@ZNr(S>!dk-k>&G}gx=;b4W^4!3(rnr=)%9>DT4 zy|kAk1L-u=B?H|V!>-KS5hbA5@%oRzBSYwh7zwRoo}TkV2oNQGKNFO~gC=!Dzt#0$ z3;bXb3p3*1?6G;d>u*W7#2m%H{9SWTjdQ`OTTcG-@aDcqU2VQKKDcAqJ4!MDR{Fdz zku8n}Q;ng8ed6|1?=g*yYvR%Io|ZKOX`U-VBal@W09Weh?1CemjD{=RW%i8*8+d!Q z9k!$WT*B?xKZ@JkbY$-!qxqCG*4L`M=! z&f2(mB2&}Z-X85*-VzA3Ebof8w|CZLCKhizYe_UKlxf7qayKPkHm*4$0fMk?x;Ib>2Wy#K0^ZM#ZTvgfIhGY9c4C;$O}Vp zfVjlL2Y4f+EqEk~0`t2TmwUD4lBQBw&OJ!7VNg8}~U;pa1K%i}P z|Gqsx-g4pcL}K}cTXr@qpBNmRSl)2>T;=t?iBGOs^T~-m9*>aYiC{cVL@Rg<>jT?O zO1NFi%00|CEPj*}VyRU_BH?aBA2I?;Qe}C049GE4i-ua%1ge_Wn~j>;CaDQFTCH4E z41YvNUIaU>kI0FT5p+GPy`Q#oASeEhvChji@{j3Wdo}ewC0;K+ce40) z@m|p;))nuO9Y#KlUbcZ=avUils5X;MC8`2G=yT;ZjvhCV$pg(J3Aw8d90q=R&E&;n zk)~YTN+^+K+Rjl}Fh@y11y*6Xs7m{roDYejY&JuuxIAAqDU3}Pn(J#8UBRUWYnj(# z5oa>(Fm0_oC6n&$?1*=3wAGzj5&hJ=O=r77J8PJ-s=OzA{4R?@SytPRb{4M;u%!Ua zF`pfmc=)ZO_%e6}iK1(4#YV49z!+=D0tp93Q_zhT@55_@oT3QrM9e%S; z4zQK-au%RpoXhZy?hwIaJsSiOo+-pcm|?}#C!RsBWrrTmJQqj`py%;7?-hrNPwd$v z`uAusFFqxDIkJhzx)*<+$5>Swo#7AAoDXiiwT9R!fN>c?K_Avj2Roa0FZdLiWS`kv zVhHV3&?OJLoi;0sbOx)E7+-;5D$I{1w_=eIXeIpy+N46=AZ-4XnV1J62XifrT}hfS zYLE*%TOTmF$SG^$i%!Wsti^VBp>HypD%Vn#31{2(oUpU9CA!^ej%Ab+4}ay@^%exx z7&vjV#EGvJ91dP^%dInErwwF)egt{0*fb;E0tO~TORxC_1DCU5N#zVIXJkql@IvN5 zSQXz<93}Y5SkNeQN!<|>fTz%10XS9`S;*nsvaBR~#sR;?0QTf$)JJyj7WVzcmqcOz z+ag%}gYr6SCO?V0>m8dr(vNN^tQ50umaaSElYV6o-iqW%0ZUdF%-E zi5GB3yeUEUhi6_Z#TR3-U=EspR4gm5-m?=tQ|3YYsuTAvda58T&#ZL91Y{sb!ezxc zrP>S_B3Kz@gI=+}q>O1MXXoPgOn00cIKoRF&v)bOd`yI#5spcX`cP039}7J)GdqC5 z@33A#sN?LCX0dRB&q~(Ev#*uWjm&i!)ej2v`}5w6U%as9IrHvIUw--0-R7O*SBD=4 zJrorMVTVhN@hiNR+gYx|6FcdCsL~9C;0T(=FreQI6pQF@<#V9IYz$)`mR?%oND05V zfPrArFU51rc_n(WU~qK&N(mkyL|Cs9Dt=Mfb)49u>{614U&Y{F#Z~ecMap8AVQpc* z;l_faHXVbMQYMWtmH^f)azXqm$t#+-Fr{-wUM3heOwL9Yo^IV&ILk)W!+K3=y+|lZ zz*RTW-#=34GOnli2G7{K^^8HEaTVwF?^)g2x_VE)ab0ossf!k!di9Fs*YNck^iq?# zmPkB}^XeXtdO4V_T;h_JI7GY14zp>F#|8Q+sUIwI17l3mN^6>(Q(`Grcln+dKg|+8 zrzu+xZ=DvtnBH}-QYQ0HpJ?exIGu@}mQS4g`m!B^aTcJj5Fd?Z+rpu?T!aO%?9lp_ z*7ZYKIku6hE0kYKI5p9R2h&LgQi%)1njof|1oLo+CCpGUFa@U#N%QiIm%vQCWy2aW zgC+5$-0`OwXt~#FI9NB!B6}pHe6JRBg;fo115oyONBt4ND*mkgvp|!mG zq|GbX5~#uKJPETT{j}goB9PR8+O%XDfmkVxa)?imi>jE09WTjcl4Y%y7f;O+hDVeD z3|R_gPSY|YZy7wb03`Gejlsjg7k2mN^?sP3fEoJZ`;^i(N&g&ZV1aKjoKZ86-qHZ? zg)QFEaLb-Imu>Hlc8>>ABlwfBFt_yFb|+jWi|wj1P&vwH+&VM|0~ptj`L>+*1L1 zR1a`skZvke3hK%?HY5{4Kh9kmEX`e44@@<^;1n@PGf@yHq(ZO_DAf&6A`J~vYE~d$ zUP2HRjj;|My&NA`$2J%yBG9~!PFiW^0lH<{u=N`r)R{i5T;9MfDKLuRw!xGuSl^%R z8(ol$EhjumJA7_ysIH?bQ6Ke~?TPl0_VlVnO%aHdJ|}k@TE9{QoU68x|C5!M0Ji zn$k1!a-}dmaz=fT(#1ppmH`)NP;A=sg!r?vA4sr&=T3AO?XLmsPs13h=`#fn<~&^K zL5y*=sGc4Jm=nYu@H&GZfN2|J@X5#gqKdO&07me54f8aC;0TRHAnL&=&$)R783@iW z%s?wWYA$^Mb?ogd(;D`}4JX;w}Ay{oyeFw8Zz+=KDu&sYQL_&7$a8 z-J1zmt$|GMnr<;w^z{n${pSM&YB5zP(8X7lTWdI|;8ly1*tZ>@_wlYQrr#6)a2T4iIsQ5qc7oVEAY0nv79>3+*u`i#& zy2BGnKZd&Y@VyxBlbGiWX6QzY)d_h?m&d_`3~7<&aqOtTP_h0_YcK|8vOKQ629+R# zl(G>uKSLfbm6tgn7&1vrM znJ)2g@e5)r8rS^SYtUCa-F6iDwGPTWx`MbEpn{xA{FHJL^cTY>Wf*TDu9hssgmGHc2W_lLU`k>P@fDB-A*1BI`CwQ%xIwTMdb?gPf$ znPtpf@$-AdHa+T2z?axT&&}N*RLn?;A#<#e#6I1a#248gKifr+PU8M3&+L3x*@^zN z;bd0C)#N@k{`oPk*3Ob%Nl#Hs^}zVekt*kkqfefPaB;efsCqA;}#NIqc1Q)*5dk2d)b zE5YYj;XGQDB{NJjdm#(O$3)@nMJRx=?^PCxPK;fp<#X9PBvG((OGO*>BV(BhwKfV+5{wGiC0ko}NEOYJ4_xWh$Or5Xg3i=CwAsE1KHd zeEr8XdzaX2R;D^f8hw?GOMB1wy;2d3y4~>*3~(RY+K@Bbo#t&;r_B^?E#!({kNE@D z?ekNK?xt!p4tM7Hge@PU$?FJg&e7q5Czs8nqTztwTVXXz8I)(V4d@EO$FS;PS;c%x zn+2Zne6UwjA0JF+TBwP(=H<(Z@pgwJi^<(5#@ujGE3lnaxc1pa%fxipf-s9!?DS-Y z1>1v}T?zf~Q3U(4q|Su>0vrZC9nWFFLmG4}R<98v$omE?P`%4Tma?FxXCv-1cV49K z(-VoV#0+nqxYc4U{(Ra&XMtJpkjGu-q2qZ+OgHBJAz)>SeJF{l@XQ#|296Dl;eajd z)q$)IksmVWGzR0DF6pxmWp-YTmrC?QMh*S65=`EjvZ%#qbsIjDlId6TdVa|fYprWr zUY9vOvv6Z)DAc)eA#UrIx7D@A9B;GVvC_TG>oNxA@@{ii!XwG}#N&efYc@Wy*L)hE zVBI!(u@w!WqF8rf=H@MDB|HE=FzHZ(nX3YC9F=-CC)~_psoK8W`24@4?gS-{kHRBKt zn<3zUx)gIrlNPYa3bt@23M}|KmofnYGQyUr8?y*?vZt&FjN_%rE0va_fh00a4zmTm zOly+XJd8+Cm|2gWiFAn54T^ZODehMP-4dw1S*?gSCL)aujp6puS_AtQ9oy&@U+`yI zud+Ivz3Db4-J|BhYlPLkl zY#<-bYZC!Xp_$8aFeDo!FcnN-2!N2w1fvlz!u@j<#Ymgk!oW5TMg>_~+R>!wg{2t+ zgEd|lnl6mhM{gI62{uK{t^^_j3mu>X$>U@l+6G7&O=&^!Bcvf#FMH&EH*BsES9)85 z*|wW+p4hTwbh|Ya4%?I)&6Y%F_h@maxP3uv&&qXR;}B-BEKybQZxF9}9Zl2mqN7lG z+`2W#4)itVYN~^Nt64_gGVkwS4_w)Rek^SQM289}%8n^8CDtM_UM;4LH>j1?0oc_mYpdq`@g7Rt{7L{ zB!plOxg$|Vl;_k^IHb1a%q(rBX}K2-kkH-ma&sc@Q3K3mz!~o~3x)GR`00O9-?Lz0 zO*+3b}z$LB^?G%nlLx%d2e8JiVSybC%g6+u>}%`y){UdxWNAcE{A z0t+kmAwV7Gmh@t}dB$MS+I~Q`>&k^b6!d1qzSc~(KQJ*7AFSST;%>1$Q|)mSVZ7r- ze8bA(oy=<&;LL)bDZhYz9HN^Gm2Ous;H`9p+#!yiD_QQ6%wr@&B0KXDAh`(?rx?3+ zcxsLVW+$O(fFZ#30lOU2q;$F&C3#*7jioHw)bWTAF#|D(UDA4;eAQL48Ig@ZIyT{8 ztK$MAdhVLR`4irda+@AkdH9^}VR2RQ9FLvPq*bO~#CcXv%kf3&fr29%@yqzik`9w_ zVeMc~M%nljMKVjDcIX6CY)a1sCJWOg?{PK8#IvDiloK#3Bk#VS&dfMez&>KxqEQ!T z(eg+|k!KmTPp`e@6QCkcOWLp$YiR;@1`$#`QNM6?wloRvzNk8b$&fO9_&~#grU=XM zz<&fbu^r^QQt!sTJB~QO#&BaLt#V*z3q)c}hUPJ2Fry!AF)%_vQl&hAGP7Qoa*@Lj zUbP@|UR|0>8QCqwcwZ`j8rwcN-ro0~R~SRsN~ zS&R1eh}U2G<27HYotU^+9Q;x7m0x5Y1&*(H1eFU~EAjhv$PyG+rBSgvN(Floq7gd~%v>J2ut~9kkXa{z0IJucNum(jrZ>Q|D#9Gf+X>agYMs7i z{)OGmypV_oYnCOqZrHst(;HA#dD*?hfoJ(8A@M>Wxbc|conq5+k3;c=^t;28lXYs=6$<% zeoHLYGQZZi4vdXtvm;{z{o{Nc2XcWXM_~8)GseQYesNmYpQ)QcjaHzWm2~}SRSXQi zBvK)C*=%-ApE<$0{)}Q0@P=A6Z6d&OWfV0eyLq#V@`}w0mQ>Rb`oVW4N9R&wMH>LC{S25;h8k=G^ra&1}{7fPx zmWCPErB~)?IlNaTtKuxuk|k%k#MX@Lh4v$IPP3;OTn&%q8Zm7ufcZ&sa>>Z|`s*v| zyF=hjL&tdQz2Z|=sJoI^H*gH1paJlue?%2MY(MJkXlaVVgIHRj8`>Z*wZomJVh#Z3{P9m2=vub0piD zN({8ZBkHh>JN+&{PS6QfI#hGCuC1!7ucaDxCzsdZMIZvkgZVf^xej_iVw@sDKEldj z$7e4LNv+KZ1{LLl5tz<`unVY)A6$ml95x~Xo>JM<=Y^G5UZA??D#juXw06KxMw_Be z85t@km5G334R8pLdPI8w1p==)$VpD|QOiTcWCOcS7*fgakqW;&0;fl8_Sac!2HL8& zY?-+E=3L5?f(J`frn~Ay^@7+XRi7Cxz6MDaBNI5c<1O(#&I>u}n$E@?9P9%k;ALE* zH7gXnvcvXPTGNlnCT(rJvU8li7S63rne-fKHG-m;A@k0Ao-=fadAe4FFDxSEQL{D{+2WrT^Dfggn zF8Wl#<&ydaM;}N$mVRajOzD^=j&Q(n!bbe4(aEs!TJx)DnWj4$G+y%y<46j2B=d9V zlUR0C_{M>ya3m{mBm)(smt3Fnx@j*m9F~WU*?aBk(@tM~?I|m_kAFd|J3J*WDPE3| zd|e#JW2wNOzhNv+y6UKYWX!RQ>vWXIb@H5Y*roz%lCQZoY+46`0)VV?@YM+0lm`%% z#noX5?6PC4Ea#SGr4EFd&u7@7gag8$3GKh|;d?)F#=Z;hefXjagm~+od)_KeJ;b@A zSg(JNep~4a(uarm5)RueaaR~*K>YB#F^NeuacS;tR(r$cFVO+EBkHetSn0CYvVSpI zi!jPTw&`Rr2v_jf{Qd2R8zss)Stu!Q}|ejShsDs zc+WQUs|vo?SKxcCrL_f5CdK>Kg8{eGVT1joFi?xn(m_>HRG$@`3F{XQmr^-wiFb>E zd1BtdNicuRdgV>Y0Dy-5A>R06RV{fKNfD^ngg&!6b9#4hvDdBXw zdS2SAc$Gl1DuRnQJ+C^Q4lCl4aJJH$4Hf?_u-lVpON)NLvMVV1(`^aQZZ;ST8pjy4 z^ImEVKk89Ers!4}%lp%N_Q3s&vg~{Qy&V5>1rH8A2qS9Fg^)l#IkGUn*2!Z`VF^i8 z`!Exj0iXsW9mb9EV|y~+ndNC-a76I=8gNbWC1T+j=+5wf6N)hj*vE>XO??vB_Smzn zZ0M1y8AJCem2P*XqCB9Dy!b?LSFremHgM4s%6coad{7Q$L|UvC4`avF!h$2{uk_fV zi-SrEE!dn1fr&fJra_6YvlKh-FHoH23G`P#BB1P;4k-n#_Gg0r>O{6G*5oR?6-zzt zKqL@x9jjl_m+aISJ%-a}ocj4xu+>=12IT3r9I%Wq$lZ2y?v5D-08HwuDKTaHf$9@er=CE9~8lKynX>yA%OLxoW%p%JE{C^D+Ysw@sXQ+#4E}j}!IMr_#f+Wsve&E>_+Wtr zcmPwj_;A>z5rS9EZ}5U8pYlBXj1O={1ivI@R+|jAHZ62(b)qom)}oN>%JxVf?OuG< zu#eOymCE50EZFo zb+jxpdW{brW_zho0i!cxA4WF4Fe6-rIaW_F7tZupyKo}f8&_YQxGHh=ejJ9j(|?x# zjW_UTKlhVPll=_REe`|(4xB)%AE{JVIwD|bDt*6ESf__l z8jsDUkI8D?Z;Y#_%YSy@jW+^k`@4GN2)Vbr5JU4cU@;?ps7&?}_7B_w5BfS@&e6g}{&mIoJ zfmDPT9Ps=Tom=VQIfEyGvpO0EGy&v`c7fTi94k;P>1jq;eA$n z0}gI7o7^mayjSh$6KcSJh$g@R02bLNW^ilC;x@Z2=7tYnp_Kzb_((slmOIE-K1*|~ ztWMRbTAfF9QjxRh6E@zpWdjjb`-KCs#=;?IQ?>G;t{_HuHPCM?F~-TTd_Zi(VT6vB zoxGYVyMM1!SX1wJBC9x0KbrO3_8CXBs%IKUvp!9y=VXRs4F_{QkM7<3sHnf@_#Hb&uf>`LpO1P1=+8wd`p<$d#Ty#p ze(gYP>_jQWP-H;|3eJ0gAj+#WP=lifG`!S=F+Q%V!+PW{8y9Kq(Vz(GnB1`ne8Pps~Rh@(a2Paak3-GuMJDSU@h7hC&D1IAG1e!qC_G zXdfkF?2f$@;2kBP1uAC_20S%c)#@!I4+V!e;$+l(aues)gLs2MgT|fxc~bdnf@}# zqg?e)k4-pI&~0vP`_B{2C+L%cn?rD{v~T76p9~7Q_rR5LI!zST*GRO!%J{ zO%_KmD|$Fcp=D++_UxcOlmX|})+^j%61lKumKK&AxR+#Tx$bTlsO7#b@shZxCR-Kt zRru^GK03d!^h0_68Z{91J1PPW>#|`4g5kc>??4+-#Btkzt5yn#WTDE2C_7ug7GS$F zpWcY)`ko_s#y4Ya-tV!iIL|^JX3Zi?gPrvPtPs52oWld49kW_w{GL9e@coL~XB7pl z8@gl15v_7#+%m8DSe&?NCTnh-Z+OXAZWOW$j^9yJd=j}8Cl^5TSx;ie;$1p+V zE31iXWN*OQA)(sze!$2R1p43rFlocF@!9^6Z)eno0|?lb6JxtLAP9i~o(8smLCD~X zp|WF-o{X#%yiu$CxbwR5P)j}%eR{)gNIJkd>{8iX3cD;`U)RcK6t&jrer7}}lm}uC zu8T$2Ei_!t3)e+s>jq~AWfmtRjeV(by00-}IHJ!li_=Uc5-GzWovNuxY4MsW@wr3y zs4q!fB1x|nDw8$UFozl1cFDSuO2i{<0fey_Y=C1eF((Ww$n&L>Q8{$Hl#x-CH;m25 zjaYFb&aBO-QJ(c6%p8DaYD59ljH$;B-@(`{pD+{$WeE!uCNm;D%$5d3aR`ek$K^^Y z{ZSNo{W}Kbfl1{boZ-_1*QFI^x^<#PC&`_j%;{sp4yUXuu@9GL)D89M% z?XCF#@}{Mo)h0~bR*TDS*?}f8e5`HCTHs$u1PbhmC>ekFJQD5Dj?zlsf>j>g8B7+| zO*`g6BNUuc0}+}jfzM-y2gPo;+2!0ZUux0{A)3zKjQ28(3W7}a>{@UFkr8p$qZI*t z@pjHR3bIrpW6-z{add{SY!hB#!*qgAO*`_qG^>VSTYPMrCPeX_6%lki&G2N|u)+ca zu!u0^U=2rQYzAO#&a($4$NLnmn0!vW=J(tgzjEE>7k%}s7hS&YFpi3c^lQjg@su2t-#9=&Q+XOKzPato7Nn=bV*ldQ$sq5ry=+`jTL+Sa`b$~ zLIq|+7088Q1;gijEITyi9#|PX%HMz7nm%@jQMVaN*W!UFFhK8az8Fq*sEF{`m<1s$Vxpk;*s4f$0iMIs( z9+%AmEhAduDmVf{RlMMNFwST}MyN{m5nNVAS8;bC@Mt``5})re@m3try~X0OV6RhQ z+3YH;IBO80At;AxkEje4SRldhr+P{?agDGH0q^*8I`T+8{fkPPZ-_TNgjw1w3t9{t z79;(56tOO>NJg?F()B@!HOf-pN6<>X0AKnh;<+}dGxtk@yg@aRS1 z!GWO3eiR%tGP?#Wdy79vVHRST;(?7&ycj>ZOAshzoouLM7Od?z!FEwAc8@@K;5cC8 zV@5%QzfyYb>67mx~#Q#$ZW#l zmaY>$u0U-^%{%Vk63ioPq=*$@8>yRY57_YSE~R8A&1vS+KeKA~Xt~XFvu-*?x7Pp| zi(z9^sUYor*eZl^8JwNZJ1fPjz~;eWTR?0%SX@EKOq%+~R`_hSH82+)EW&BXN@-I3 zR`deKHhh7of#!=l3hAcCaL8`QsVVcD=BH{x4dDiR(2kYc1O1$L^h+(!2>>qi!Qv}V zW-!AfnnM4K2xFXntM7huf{6GiUKhSyh~>j>H^@wL2QUiZ?7mVpAS-Oz`tbEIKF$9i z^*|u8!}d>AGdjjpuG#pO&Z-3cMo!hHEm3oHjI44@eB3R2613Xck+4du37Ds!iets# za_>R@lYA_x1cw*_Gajojeg~PdyD*dWlZ8nFjtsQ?K4<+3F9T!*w*_xUxpFN^o z8=ctd+}K^?bk=k?Hg)6US@4aM4}Y-~i-yj1*s%&F3rj!=MG@S~ z9DzknDOvJWJDAH~=8mz>;=vChh^3lStfM{}t?!8GSLMFyOs1OC)9z$feSLS0+g;OL zU*DBv`o#WSYQnm^8go-YHx^abD3TZWTD z_;x07Lz0!m4b`fL#Zc_p<4jf zfCo_1V#OHLtTJP(vJ9bTX`k6b^K^bzZuR^!ADlmoAJeh(tdOxp+cvWd@CJisWC1mE znQE>D)XKgo89A>3EkCmuXVOY?`1TPcxdQ2ELMcTXkfwce=r!muYRa+b$f^pqv^|#OK>zmi!KRz)rj)dBnzb=Gd z#*ndGHY@@qyLY(E{4HfH-3GqdFomOhrua8}w}EYZ-E{BV-)+c2maOMP(mC{wrGA!9 z!Hb~e2y6&3e89#YZRR75bCdxJqa7AX%S(WP=ifW?TqZGa03eN!r@?C9XXRa)d2L&? zb^0WfAS`40LMF~Ml&uf&t%s69vG}ha>|+lS3+ge#E7jj&jx69R?-U|dtX%jAu6}3$ z3{rBfleP(ty>0+Ykq1&CavEPh!Kc zJCAIE3vf0Vvk^_mduhONxk>PJzOV?_f0SY{rC^((X4sy)u0rkEK~nhO^QS;_^@?C2=5S#DFm|+PpI=w;31yCF z)1@uOz*d+Bm`jVqmT-B5GxJF_9>*VC7$Jw*W@+?OML$6-Fk#BR%JTT2I^sW}H=E6Hoy=8>Zg|82 zxPCnlt?W|{2`whASTo~`4vLpU)ZTrv{JRbsjlTk-sir!yrVvRa5_JiD%O#D_p6V*Y zGcDy=p5Z}Im+qO4;{tKg+|_ATqQs8odp~O?fBC_jp7`PgGkA_k`U|gAHGg4@(xS2ufo&-rXK+Fm zoc0`+AleuK9B29sj{-kK634l+T!O7a>=wi-Sn?rGmo%Vdh=jc}3+XVLS#~biz-$NC zNO^nqSr93+RBCQdXjpRq*?>ryP*QHZ#JPy2kD**@EUhIw=7E{OnC)sT{$vIiFbf)8 z{TXtI2iTSt^)ulR4g0+wH=l=5fs0t0X->4rKc{vB;u^G z^8s$!_o~5wi{Z1v2?nxRy|6($bJk(ycs>vccLlV=7p9$&Syjw{&j+rk1RplsX&5U z8nN_;WDT61*2CrK_*h<7!ExeEr8Y0C;Oywbg(B|}1jlei@D3ElWgLhpgtJVIys9}o zq|ijA=#E8_hXiUil@cU=k!mGBREDh$D&i0uDfOs=qgze;nwQ8KxNS9JXQYaCw^Y0m zQC}>0_~Keu^Qt-VPP?r?CN(grm4&-TJuQa?l*oIRg}*V3)W_mxl&s3P*+39h9nR8nB$Zh!=^F`?MHzeHr0m%PoDTTl8y zo#%PLvhrCRMcd#hCnpuP$p4BQ1m*X5OVv}{pir7Q*EFXtyup6%?Gm5$np{e?1SuGb8M6{fi0xqp%D5z|_r_MUhD!Bw8HZqnl zY<^)7xkP|)g@ed1qq#3uTT8Y)YEw=6;TS1y4+x1GARKaxWu>Z4z>S27Ebu~G1Yo5LW8BMNmcNC30B1~ zc@^Mb0B(W{26U)`#Tel1gN5~_U`;n%AwPmf!~#M1pKaZXVXptlnJf5-oGaK!-CVCd zhPj@?Tm!KB{z(~b9bEB|Akui?Hy99H7-Wzu$Qy={uW!Z{TthUM z#lMw+GG5;hT*!%`fOLp{xEE{+B`B}%oPC({im&botl_DUVn@rjpf z0#@?G6(9BaAu(PiuwW<$WuD5P(d8X1(GFMV&G*29)>xV6YQS_CRHi%Q6Ij|(U)hT{ z0SEEW500|2B5eF*d5?6P4dHNHc2d=$sv!V}YHW!xP;EG`8zQs~xi!?&lWvWn!Dedu zey8}5ud#~$#6)#fA|G=~+dnONOa|xhS*-ImI>L%~K$uC6=+N!3WNN^|QQ}khYuq~$ zzGXaKvLHPyP2h~W9gLo=Mo*io$JR`8rrT8KoTZ@VfvCv5vo&>##369VH=CM8=CiJL zk(B7xP2D2%Szo{CC*!>-pl$lz;I==G_YUAxPFsIiMs|dWv3x&sS{qL7#LZD%G%{LH zOL#tbkg z!)x}u*;0N@v2J0g$9y)RA}O$KgO14KrmelmU6o*r16R~=IA=9iTqYsCJyffC1K>sr zYD;B{2C`snRs1a0S)s-DY}y&Ve7{OgNC}OLY=$5dQo+O;humyNgZ4->ezR_t(0tY} zWn(^c_1)%dn$P+(+M;=rvL8+${^XHzVr^x9mi)=ws?O?;R0*!QRY|2pM?1^Fl{5yx zYT0;`rGp!7vHBu)gxd|5TszV{7pm9BCpIJ3SJmCN;<8z1^I5;%b;9p8v%==HcBOsR zPTT=+vVR20Ep5$1ABoS;O-~M?f_k#UQ(0ho7DCH#Cf?$MWpfaR6t|oLJ23Gb78)We z79HoD85NCvb_u=wZZBLZI)%K0lR!@ioKW3yv!J&z7wl-z8(vbNT4-5>Z$+LU%F1{& z%M7Uc0{a?t1LJ|=mO!w&?$Z51#|Y=I#-a^DI*WFU6a@-ouG_=FQUcz~c(q|CMKn1N zGeoNtYNZ9c(;uo-hL>4juksGu^Zw*)KmQ@#(I*S+&0g0VeR+96{ycYmZ~W!rr^b%v zD;G1Yw>NR3Al@B| zV}jbvsO%`kY)@ktj^R#%|FdZav({9G2v?=3SzX0yAAOt+D_QZzzuLb^IjjH2*Nuu< zlXatS^GR8^fHh?OuX2_qgZ{n@a`FX-LPJJkaHRNRjl|GGvZ9Iury~3Z^n?2$EYRLh zC3>j7IU2%oho%U*hsvYE@~QI7)wh*;9sQMl@l|1pGChJQXVl&G0>GQ1axJB!m<#bnec4l;>zc0JkwP+NrFY^29F2T{p%Uyf#J@JjrcmnJC{U7^g`-j#M>-_-q zTZ~MNBBhRXL>!TJpe~Tz$OIM-CI)!%Aynxb!p5B?Q#>%@(3^a8yjg3sVi$!_(sep^ zxQ*dugFXcC58Pj1zhK9Zm644~$;zr1J2N#a!8veB0Z1mV9TjX2*8#!)8)o?|0midq zM9l8qbys)krj?Z&cXr1I7KVrVh89Z)N83{};pfC?tnY@I{?TkWkSO%-JUAMU4V1={ z>8Nkbe(Ti0)ko~3!@H+*x#``*`GsOG67J2Wk?gp;xR~!-o9*?7B7yU*$yjGsESiQt zLgxSMn;FWr`rDGJ=)e>aU8*h=F`o&mBxb3DOihdy2L}Lik9qDeA~RQduneVKq5~e5 zF#y1Q2<5^OVZrEbf$5WgC+d@`5DIw=HFTI$C_~Ury}q3h(39EpfRd$-g9Byk)oUA! zf}meOEGUt{5rkc$k2JtB7%nN}zsm>ey2~39??OmOg8@(}gPU)r!qJQ~Od1Vc9M)zP zslJ7~bAHQJsfd}ayl*?DscQFg=E>bpWv}@c@2AvRJ^Q&@w;?WF)$XH|EU}gJ1A=?f zx4E_6dJ_ToK+q)3=-f`ugdax4O!E5P#&+UL|8Z`o=X}P22wf3xFQ?nuW~V2{5e=1X zOSQ>(%T@#w=sZxn+i}bGL%3e)V;#4aNhI5ENfGUYFn$5p5UuBq#;)bzD5J+_``6?sY`gO^ys1>xe@|$6N!)0^(ke&yS75NK}T9 z;g?BAP?wX8l>!|O5F9NY#U4OerSLO71k#Ih9x=dNkdvV)2>9YH3$mzO*D>|R@4LVR)|UnMU~>0uVM zbaZB97__ttwB$qmP3jRk6miJ zy0>!Qb|{N+i%KqKJXp6y+G=rWYq-$gn@JHP=WwjOY8kt`04fJ<$&58Du_XmS3`e#h z?PA6b8vvieU5MY;@F~5s+Z##J8WOvExRcpg??6FhMj|_coro?+vQTG|AQd3Lq`Vqr z?jVYrYCL|Jyfw1&md3z&_R&m( zZRIYShptyaey<4L@}w9k4>Bv!$>Rt2@7}d@X?|{Ud}N4OI9j`--M|8%f>PYX!tNOz zL<|7k(JL8T)_YdrtHf@_{T?z18Y*^$2qsaR8fm~;jdBNnE8eu5zTQ2$QGG8c=IBvd@Jo1rMr%myY5 zy$FTWM29Ra*sJ|x*{JW2MKC#3dE@M02mw$9fCT)Q*!1$u>7kK3hIXIGbRS;Yd;Ms% z@_DPkh-9B;M^0UP|llh5kc5)#3g@Nu^CefPeXh}?T zzGQqP89mn#@9OE!jUBvmX`*X#u(Q4Md^+0O-HP$D*$g&(Gz&!JSVUq1VJJa5Y3ga!2a4H84g8CHcco)3g}sHQJ?}#9Ynm zp;D`U`{pH4|9#z&>J`yBo3A_V+CP?ck60bnthkvj8rJ1a%qC{(&CRA;J6a_mTip&( zb35z<-rUzC{Gnb1aQRXZBoX%^Od851^7JBNP@?DPdI488sw&9dkqJB|iS?#KF$(~* z*g;YCj4FYAqQP{l@=b77LBJfFjMKa7Gzn*)*QZ1vJdIE&`0o8k?HuqU zgtfOyR8M1ZjJAj^2D|1>5k!!p&VaNhuwPHr$gUEw&m91) z5RH(HP+jK@2LlS1_l=nfN2Xuos5?rv$iMy33_!n$mpfy2yeSJ6`|5=LN49dZYsLMq zZh^f5|M?|dFVlxun75WYd8fsq;gEZmIe4c*BoF~Zrd6KT7ET$`gsBRyFvi;2$4xg@ z-fan-p^r(f&;qVeV*mye4>36Kv7#X(QFW>>dP|9gK|>g=We08Gr-?V|mf3s?u9MdrQR?`)$0(GEQ~(eqnA0tI1|Xbsudrcs4cf3Z>f-3i?>Ji50nmUJf6+q1CRxjd z78;pcP^sPww=qB{K1KQv{osR076GxJP#gUee)5C?2Ib&Pv9%J=v`H_0lC%-e=dM&m zalrdD0Z6~1hXj1r9>rSiv<*-D%W`K|Hjk>SUOwim)nFB4;rd{a=HkOv{g>S;@vqtH9pF~)Bb9tDs4)hh|v+OOCHiLG%ujI z8ki`RYnGPo2(-c~x70q@=2tz827`GmowDk>gK46IDTkuGOnYQ`lc8LE6A>wlu&K5{ z8qvWM#Oj$+mEBQK&QlRrgU(dojvk&T;D*{sg}6*1U}i(~34j~c!rwfDqEIkzH4_cQ zDNAFfG&D7Y$4V1~Da)wK?~1QAGh61WOV2e)EnRt;swnOjnN@nxW0KbrFZ&-iz{`#@ zZ6t_VCL9Ahav-yi4V)|uIB#5i-XaCJy++Ma)saA1&ppxzUCVYJbS+$$^d}sdwbkC6^&qjp1@tG}{SsT1qwrIC~xBE+g3L^(`aX z{A&S)<^D9$p*BSd@2V4dL~--Ysa2pVFEra>KI^sxl)q*@2RY|+r0?Fw#bU+`OFm6!FRf;xV0+yz+PY zhyQW$A|C3qrhsLpk(F5HSwy16XJ@7+5OY+6Wu8XSNC>-39W|5(U#-G2gP$s-7le7E z=gEjy_VPQuG?p3ivx8-pSGz0y-OCb7j)ZAQ5dvmRuP#B()97 z%2N?o$J$^svL$TG)u!PlqdHC8u#K3=s!U9Cn{+~BB~2MPsk!i`8+62jaMl?XPE;09 z$c|EOVgX8SFR7yg2PCsaA;%_EyQ==|EmJuuC2t))1M2O(JyApthHA*TY6L8aGXd`a z#&Ig)TryLcn`@8dk^det*^6uBRD!ckH>i{RZ*)&!&-@qd3Hy7O6l%xz_M~RiVPnH^yvg%4F(6@Mi^@P5qO9lIGN_v4xF`&@#y9dd zlU!*lJteM@0wd^G#}p~8LyCNjo{LB7+A`Ea0G8&#sgf4gsviuEPE2s#v~&3U2>4=T zfM*c>bbe4lJVYr6PY$X#mf`pzkEsMcqn03yZ+a*UtKugy9q&j62qm;hxxuc9pVc-Q6 z^h5a}qg&B6q+ZVoL2U@Zz}5iw4Wp+V3OcVdC+|qk+45ZQRRHh?J9mq@^qu5Kud3O? zIw_{JmOc7Zjc_eX_M>J*Ct|bvv&blxb1KC;_OU`=7o-)W)~Uh?!R)WCi8?bY6EO}q zZwh+_LmhNu?%JSk%#n0*i>gNmr`FB1ZmG6O#cZ>eirSVlQNzn9Z-VmKft7Ah;06Y% zn$6Jzz&k}c!HKLgst}Y<+0!e5c{e49A`&cFetq3W{E{hRUYAz?m7b#J6a;CXw!Vg3 z;}PpPGCZL;2E;)iESwtco!~1{7yu7o!FZVFndB4BE}48m%3_on_1FAV2|hEfk@Pdt z$QSci#zmw0tVm#@Dx2{Lc0vQDSr=2B^ug1Q&;}<6zwSGjgH7~(LiJ-8)|s&^S)UcZ zlzEaG)@KbCV9_s$&!9N#&gVbhe(VDuxN=3V&E58z_y}aFl=bE^>V0;!M+u6Z>Xzg;Y44D&%JNO)#^|7f~1D zoIH1{CQq$mVV;_La?(w+*VdBe7Y!LYL`Qk_L-xFQ2=R;5NgC&o<<{UG2@_Dr3tvo< zSRPkfr7_8vAPPR zhY~lb7e2!0GdL0Z<3n^N{-cwnfDM3Pxoa|6id2vxm7&aWP+SKw>{}#+e z9S>b|z@HJKhl)zbu5_v5GDVdVMB(7P&<3p`cJ1)*BH#*J z)C!m}oVt)<_Bef!=fp|qUq1?Z23gw!8}1Cjp>$X_Hji0~3#nA0Lnh!yt(f3ug**04 z0fgT=b@Iif$=1wBuBW(XHkX^-Q#7AT7miF$9%0Q6`6*)2U@-cO6C%D`Ob&LpzrYmg zFrPQ3R_}HTcHF%xD{^1^JT~Yysp}mnqv8SMsaN@dY6Ah6cN{EXC}f zYjW4n(E3zY*VOtDzzW48KnpA94;0&4NA}M4bjB~VruYBM$FZJ}fla-d5bPV{*1AZP z2PY@8nQ#~g==I5U#88cA$HVC`3l3>YcPcEFlQhbtpmM_rWD}X?5tUC@FP zD}rzu4+zl-Q*1gQbOFdRqV6~aSkPkA163Lg0aQDB;yF7f;vGx3j@#xWna|o2l?fx7 z@~JnA_>N-NQ2Z%3KWvVY`D}Egut57zBlD=F_2ueXvr%j@XjzE(wkyr<+jZr&Cr`fi z$}aPX9DtsG;kTq8#E;`Ys_skBx}zKivVplJ<*9Zg4)R^HVH&U?m+{c!XPy2Eq66d+ zUZ^A%{1)HyT2gKJp_8wpD$H=cBmY}su<~rw35bf)-X)8~*jAr-49Mgkpf1=B z@%nOhbaZsb=#Ke0SnDPyBo;T>5eYf@NSE@sAr_{`2HS8|KuikxQpmgtE`!RgKt{=k zNqRL^9xl)j@J^~W8|Az*di&kp$WSMHw+L!7cn#jHvahn)F-#t;1@BBvJLo1aT;FIM zS0gebdPhMZv+=SX-r^_{i^Ft7Qi+xZEGYv3e98$^g+7U)0`7|i^^2+Mu$~F2p@=O4 zGv&4^oG^c?8q!hGU|eL50}0_RE~a5|kJy+XDMq(*>I?NiR`DHt%8GEz z#6?AxniG-*X8bA07*^%;DSPqiAE(4z#S(86S6=k#=*8%#UnHP>5eNQ#-*|&(aZmV| zBp;YSL;hh^A3Rc?KK(2q7Kcmu#e8viQTWF3fVtGwKeiWB0I*x)j><3ShgE(-(S}@0 zMH~P1pF*XU@y{YAN%#&e6-y2^9r(yc%%4v?2V%Ps z9DFm+k?RfLjQGoPPb@k$PLvJvf#q;yUX!jW0{asaINkD!>dFNJaz zqLZK{vg$}gUY6J$8JP=+EfV`nw(Y^{g;?Slf?!0OORxXu4;Tu`fIv_MQ~)_>tG3BC zoF!hPB?Lb}1bC+ignv|DMfiqUKmC=LE z@-cr-AeBNzQdYuQEXnHx#WKa0jAH+4X~+3q$5d)$=1p(9eD>_E7lY|cCgdpm9@v)s z4u7DpaL=ukyTmW8cV9Vn3Qi@a_c3%9v4+GC%UNON`?|6j+-J!|Tg+IWq-;S7+tmt~ zvx-?j6#>yXBt#N|1XdMsS_E1!X$G!eMsqn&B&LMGr|r_xQA9mV>L5sJQy{xJ&gu~?rR4iHmO9p6xv~>_2VX{IACKs zw6AGXKW)yG_)j((NQ8WdOaeS`!vRrcf^_1%EJr6T-r^3)xJ><`dPqe5wy@$vgQ4NW z(^G9YGD}fNEdcU`OeBon;4K7t-?ICFBJW3D`j!+_q%ex)Dg!;>;#mD~V1!&xd9Tm0QULg83kd4h9QSxKzbUVQFNE z=)$hx;QG0Jh1131BQw_>#$m{R`^)(cwjRE2=E!jIbYb7Q^}*n-l6b?V@}<3lk&X+| z_~}!qV0`*iJbIxcGPw8Ro=ZD;XG70IooE4Btbv{-?F5_aStS#bpc7K=8#{b{Y3cmo zG4uJj_aFErp1X*;R=0pQx~%8Yrt8*^b!5O|P4QQ-QuM?Ad(@B^CD-zNO-t-B%S2(1 z(FYxHwq$2XI{bw)3MQRMyef$uVVr-{zcV+ul0J9p)VcJ^U~XssEDb3#aLZKBZ_^>QIY{|le1mbD zL9A6dl9XRVR_R8Vs}p!R?Gyh?d1{_LvzCvhMps6bZ(Seg+WqvKx>r|Ao!ygjYh&~K zM%&v9%cEld+12jO<;yqhzq`=CV{X>>Kk3}5e4sNwne7`+wfcj(xnrf#8xKqdqaERU zB8gZqJv`sjH`*2Vhx=v^&JJF`cOu*I)OZ3c7XP!YdHm<3+|zc^S5}qI+JnFiux_Q< zQR#iAbz_m6Ko36a$RjxySG~yMYo7hp;(+%*n}xB(!5~Q9ut% zp+tyyOl@4{h2r7L8^qPhL*fl;N1n*;w-4U6|LSkw#Wh}qz4?8xDFk3cohF89b$Q49 z05aQ|^>sYL@Wu_>G3ElJ6KX1DQX`7+3)5*Mq<7`#1E z`ArI9j10mhAcuuEV4U|{tlI9*KKpTEJwwQRSd_~#5R7*G;Ck=J5O(bhc(BZ(Z($gk zhiG5b2defk@g`Tr#*3b_#LGTzi}$K+c{B+BeCfbwTc^x6Hd&1K-7woXnvDdKL%ll> z6ywRM>y`>9io=JedWS&E(otG@R%=Hz+iGMdcBqN(3}5(K+fl+@cMdstO;4yQHM4qp%kMC zw>m(DLdlfdrk=de@zs~T?17aNGo76?CsrP~@9YbX^z|Ki!P!4Qb4I-T)OW^?TwYtd zd}Qpd<;$-)dGZyPm&M6b;u!A|VclrGS3H8+ldV?Z!>~S}eyPVUc;mhI9x#8tZGOOS zum^}&wBIlD%toybGfP6WHOvA{(o^7gSa%T>Prt_TFbGZ@Js)9*T_sJJyXn9{Ptt78`*Q?)HDhS<6W8O=398zq_t{&t{g2b zO^y!^@MJmqNoi#Pz8DmO_q9yHq1O$)!k1bVElDLxz?dg57oT>v0-`3(VdlDkc1*(${OL~3^~?w` z)QiAQ7g5s|n4F)fg~|`CcTEq$WS;%FfynDm z@rMJwM{j@H9)H-2nGa{jI}hJDSy)I!dQ)qP&bm;G{M%T2p9K^_xl>{-T3c|oz$@h0 za^jB*5oEgrqKLu8RO?k3GC+pXDd+P=)v@968g9@uoR&>B&FRL3tqtnN<^(|oXwZ^Y zpQ*+zRXrO|ACfVFg0pPzIhz<6z6zTufaMJeXBNZ~c?kF+6Cf(%GIBm>pxNKxq`uyP zmI3d4)QcuVoF>-qAFzH2cn&M&cyXkV@5*#0*#xYH67fPx2wz7#7Hyau9K|@8 zfe^oQh!&o7{_GZMH`v&LY+aGI4N`_Au;LN1^CuODdIK zC=$Qy$&W5MhB(SBcHS*3%C*_08 z970YcT!dA_84r9d>~b6ABm4~!nk!o;sX8oY<^9_sFdTBq7~SH}AneAiL)Lf8@m;HP zGvlLuJ;{z%_?a~PaZeV4`=Jc8#){>g1=N+81W;oNw*_*3Iv_3_P?cN_G#2+TkEjA> zkha!08i*1+xVsdDN6vLZ8&H!KrQ$- zR&%LTb@=~&gKertbne*g>pzy?ad14*K7MHDGrqm?7FANt#S*&KjbGA5Q$JvMUW2SWT-z zbtM@-GXT3%z76qW?V$P=8t=4a3G@%H>a{BwQlkOZh$|(myl6fK%tDBsk=3`{L*TV5 zHRujvhfxUg&;}thKqG>uU5V$kW(HNpye$l4>`EqrM`apBzPkpSBy=2j*&aO6HNFkW{ZEX`ePY9vGyyi17jCeaigz@ndzx&j z?WwgCvw=OeA>D&X)KQ%|IhyLRVutmDoU0<&q&>A}3Tz`iARdC{5Y6ZwGq7uhXh!$V zhB0*S+EanEGpqT6xByHtNLemM-W1NXpktsshnXa>mz)bV+WocTz(|MpL6RbOJU|j*qmv17aTmey zK@$&l8&l#3jp+w!hGDKtH4@?lYDa=4kjVz>$I(CqnA1xVp`iRzB!P~Rq*o=N)4XHB z{^{AB?VqgG=k`Sg_YHX!I{UFteX86vNbnAMpg@QC&aN1s|9gJJ;tgs^a;gb%4wkrf)`44VilEr)TaBsFPm zN+lPlIaIs1HH17@z|ct|_MR)8&E_+K@!H8C>}O|btD_bl4twy-2e&u|4ijzPZUJjE zAr8$`b-ZREAe4eWI<%!YsF%kxsf`D8G%3YDP5xpWEr~w!2;Dqhiv*r5hzqsH>yA5W z57>hbUKanM=6DHl`s)8*b;8b^z8#yWcrEhi(r&^$y8n9%d32=##FNF=WV^L`2}i@A z2JGnNyiO>LW}4Cf;u*w3UuJ_QGt=2~t==#|j9#tXr4Q^zRU%!tcY{Q_7_)I*Pic}( z!uYXyLf1En#4EOv^%2&NzuH2gN0oz$`HNH5fVCU>TnF>{Oj=m^-TB=sJ7=eXkI1D5 zG6Sx)!@+6onLwt`NtNl-2F^4j%^a3Ls*--%+TsCN(gw*sX{cChJP^@+Chw;+Caw)P z? zc5(cS><^yZGj zc7}Lkx;M|m`sYR+IJlL)Fj~PDvaa%O>;?~HPM%%DE_XApa_g>EQJE_-q$yS$URS9LCIUc+uKY=n|} z@~}FQwEK8Jc@jp$WQyP{)Sh4_?3NJlKw;PwkV`k>BFm)tU2ms17j~1Og6N8@AW|K6 zMkV}(=I0(cyezn;#C@a~@lJ?e!)uH=f%`QXNm^WLY9})nHb$~NuZEqT!1akjP}?3& z^UfqPG(qu8(h*Qi3?8!wxBgu!dC7;iax5Q{W{*ee_dV@ct8^x8#*+3_{t50m+Nx5( zgP*hamJ!Hz=C)gI+FPCjXYA>YNAaDERFWC~iI%^fJn(kpJHG?B*ztKg&~o??0y6^s zRD`RGQz46u1W>n|o1}dM_;n5edm-$$gDFh7g_>^AgNC;;XgDmvI2;+k=$e5E%jXq* zYAXXHA|Mn7*gOPwjEwEb350T)&itjnivA41AoY~#MLp4UWRRGc;9`V8Y8#DRyAo7T zZ+#Mm4f1@@G_7XM4+8G*p!|THt6yYkw*dZIg6}q+b)(@O)V`)rn z0($pLfBYasaMrApk%n~8Gq|v{C}Rxaoj3N6g;X$|7SB_M;L*|2`tJVa$?oof&jeDV zg~73OAdns#ER4bkJ)NAsekGg;h1)}sUp{!<_~AQu-22?QLMYPNHa@`bx~G!w+8++t zyLn2k{?}Nse=HYD3}m~rE#h3~P_CmRH`JLN=I3y7^w46z2(*X7K|2}Ra}X&lIoY)8_W#?Kaj0In*_faEEG0s#>UB~)-W zFT4{NGP129BN3b=H7+!}nqkdJI@x&GNH`FVFy)LpV<1%hVR`tg<}PhA)zjrUYCNu% zb+Pv%bG3Pr&Ndv|BUEp7-i^k0j#9Elxb{8CXX;u*Nqp~< za>D9F^{XF#5>K0ZcTuKi_SQ?{H_DxNp1b4Bp#w_`tk=QlaZjzTgXba7Ux4Uvx@7x( z*B^^QN|Q;u0a%gZ8riN^tU*p>oOEB~EI3krLe+xh5xWkfg|<3cD1_6chQtk0Ga^qH zPr=c~BO*o!?qU>MhLox@V#EE|V8*qRr227q`$MX#wbD~+RzsF2JhkyqP*EV~ky$mG zryW2L6gJ7sQ34~3amK5nB%fsZ{w<@>j6NwOZ8V;%&v+6k*_FX!SoT@#viQ?7!qhLE zKX=EGLpv9dxip_jS}_ZeWRCR^qNpV7&5fwAvdLJN?#}SOdAZl5oNn`$DLuwCZNwBPs{O5$Hj{i>?3c`hD2#}cZ>=J z@0Nf0fE!Uc_CO_5fKXE^J=uI*jK`B{(t2nz`lL|$V|zn`X{Oapw4c8!opgq>J-%hB0gkD{ z1@>c=AI4)h9jm+(W@B;f+gy9-dpYJn%ynl{NKS#w6pSjuc~q4xx$ZMbXzm1cu;x8; zZK9wREyog7FV7W~A(o+#d7o&jRqo zuR(hz>jB1O1_zDL_YvjkzPw)~_Nljo&*JJc-B+@&eHHIBseKcFS4OzFu#o?vyV@V& zM7Rutd=YCV)j{YlywE*Gdl<6j@|k_a4wkS_H$YtYSg7_-*tat zyca!c&X)Tz%P8ChwHl2lGsH_9DkFnx0qeDikGjhVOWZQ-%Plr_fk|PP_QwdTbSWF27|ih&>ku5 zanf;@(`|-w2KW{1YwV*E%UAVb01~NvSf$P4Lc^jvQXIxl;=Z0jW@P5dnY-Gjx+>SM zTnzfV3-;|-fA#o{0dNzU1*yeauRI~XCh55gH^{6wUd|RrW~PxBZMHZ&G}zbEl}#q7 z-#Y%P=_v1MhKfjn=6i>cB}7JwF!!Df>np(Vl$a8Vuqj8+s9tCXyR8koElU5&ES7d} z^+>84?|IU7T{mCAH`%PO>&o0&`!sgLOz|izDDSHfLX)|&&Wt&8%2WqcS8NI93Woxb z*r6ioSmU`s#o=7BJ5I-j15u=sUQ5sU@U8pgQGgNNTU*S-nZym|jNFwPA532!-F;%@ z1LjvMTa9x_p7@HdH8qg7_g#HtY<(hY`+iwhr|=}L!F~2c?60bA*J(Y**gD`tV#d<$ zAPnLkiuwMhq#GkE0X`~3oGIB{bIw$04K8=A+T>$0(Eol{Gi*M$6Age&-dgMNT0!$ z7%hjk{A7CMN!bodj@)cqcgp-c^^nS#cta*V#I=Fno9H~0wnmCc=5I}v#AAQ?_pkod zq05&)Ab#-0%Gdw8@Nw~jk9`bnF@~(wDB~*Qc#DndPeMfTgg7#!(y0{bWqp81yUpNq zks=90_GKjNcI!?uYSNWqo&rR)s?o@x$tQl>$%^B?%TdTN#J2o@8@`J?oaDv0{gg$5 zJRFndo0zakh*i$?`BJ(${COtv_7MBa*e$7!G!(|P=!WxRje)gn(%NLXCI-)aZ*de* zyD=qv08V$+?|pH#hY#Vsi6?kvxgA01p3G&=`DIohb6v|AQMG+I9p0Cw@L^hkjd^zu zDbT^=EM(~RG^u)L-Chfz-h1EkGT$@IswiJbOq$X;|J{8nAkjdaLwKi{^|Ftmv^M~| zPLkgNR(3&fHx3dG7LK62vT+!gtO$93&HnXPx8oL~Q4h5k8(UZ&tNvaw3GNUM7aMat zewf!uA&uW6A~kVumQr68hbt$}oVoIKG4_eOelUI8w0KwLP0zedb`+4OIrjwS-iC(& zrD_<$K!P&|(cofX&@cPRQoc76NYf_Sj1ZMhrW%#h$9u``#hpcx2}oE7)R6KLyz9_O(t(0_IS*Pmwu zZmTl_Z!Yj+$F2-MmT}big^W?-D`04AH5DWfWBN6v$-8wP-Tl~N?G{uD2Uf+gQ_1zE zsY(?N=HfEMF(Na|!c1khWr+e?RZoEQ0%NA9)blnpay9A=NkUquusF;e3v-gyBS!=$ zHG%dpa(TP*Ye~`p>=JUhdyQNihGWqBz)x-D61JMRG;)D}nW0C*AvlC%(wWRf{5t)f0#W zhL1fcUPGa(@*DQur^Q+OZi=q|NlI1s+AUYV0d?aW_P`UrEk#v>lY30sSywvJ}-tvZP7&0FwVvD)QxiHNd~#>lb$D-kohfaitn zhImq)Ep#@UL~I#l`Av?7OpL-x z8Xw$zJ8(~;`rM{vRkbYhD!B)pB1A5dX!lkJlo>8)37YGKg;n17xYq0ANWJ0=*osG* zSwu&FO~(yH8r9iIH1kb6d&CkYPdHLdd9|jKlxoPUHjrwxOk#|m6iG%#3e40XFF1xB z{66H7XCz14Mwim}9Ej{W+A-R}Ak+U64893*cU=5l=W2#1coQfDE}G?|_`b)iZ!};^aEp z!Z1dX)XC10wL2$Xuj28NHM^%G@ZMXqfo{GzA9C)<*y^22b2GiU&IB)Zm`{wtg0h|d zmK|M?kFz-Y8^%`4Akf;(J59U8P6lC&T!;BX!Av~FUDL@Bw~nng-DKHT;tJ6jkL8C! znuE53+eruaCD8pdx0wo#a{Bi3z-^f4k2&}qU!3&&)?~;7;hCsXDJOd4v87N#SAi$A zUB+WK9|M;-GyPF(A#>m!RL7;_5~|JfhdZMx9@`8cNf8Sm%Wnh%!M~3r!oEmH z6zTA@iwjeU4fBq6c~h09N@oxI1CiFQcpw@6t#CS%41~M7x*~R{wJo%94$|tprNbxo z&ICejZEeB$z(9_U9)aAMhmbo{Sck3WNjvOe>+saX$WWr)vBQpzg58eAVd9BJ5Z1_` zVOUMUc|h^fYC_XdyUc?&U)L1On-t5ng`rigEqbCVjT`|cma4TyA}?8?q*>}oPtsQ` zEzrhT*k6?kn~3_N!GXQ04O37WI!=u4oolr-RSVkP6={iuH%vttb6Hl;(o}y-TYp?H zq|8$(6^(^-o;`;*l@lFUlQ!5|df}k~6d=%&LdFPW@8^C{H79!l zY{DdJm5>2V&M>hu8ivZnYSVFWHTaNPks>A{ms|>NCy;-5tZF=c>tk|_hBFKcGA|sSGHZmaE>Mz{22@>9@l4os z_a=vf{i`!QXU|@K)0>LzWMj@SZY?YgnXZ!o3<|Q}AI~r`UG-=XTrUv2%m-U7=$PcyifUgoTLHN6s5TG_0l|X@q zVQj6QVdF93HN$c9>t&U3vMk&nmsUb#^zz=Jm-h5&xwtQUpfBLLi+Owzc zGHYw=8oR{FOpCXdJEy0{M_CP|zt`YooE!wZUBC)rgfW)j{!Uhh&%zF*EJjmdji9c64uN+>hii( zBkPIrLvDW6U?IIsZq44NgVMZU>Ii$sbfn-w5omOIQ5Bb#a8b!zl#ME6i_bU1{QO0Y z1+NMC=c6>~iC0#lJ)NnWj*0W2PgQT8G1M8cn|!iqz4=K*vAFf-aqQ$7Ln=gYMzBO` zlvU;CY0H}3xDT`=W#9q@$ea|0m^{_yiHXDI#AB|XRM(5<9Uk(kYV%Gnq)DWcqItD> zSZ%zO%}t4U_qk>SED{n}Km<{SHcpxq(%oE(G?=L)3%`oWGC?%ROGKi!IyNn09kshb zJE7VSRd01dOm0{kd(o{sO`673Zq90>j&#|=rh@jYzX}bdA<_xQVK;jFYo|KksYp@@v|4mi~J%d&ox^sB3HR*v? zz7}{5ARDTFZpzq+#?N3g$k{DLK483BJ013R!68y$Y@j@3Cm*sgx3mxsvPg$0r|%DH zLNWAA%EPv({JJ;|!a-geontp(#l=WD9fX*Jdc98dget9nlrI%S zF)J)JWXr0hMlc0$jg}Uq8Z-b)xzdf=18>FlR07=|^bi$2Pmrs|$8go^oAXr6Z_r$N zOS?TC`8c97m$~Gem0zzoWJPzy{XST}SabufBe$59^yN7hyw>$~7GCi}t;T=ZO~p$E z-J6Wp&9UzTYT#4lw9I83Yk>pa&1URqFblk8kwpeDI+#uI{;V7&ZS788Y`N5!BeWT0 zWM$;NQd;CiTiX>zENmG|Oh8@%5mB?01&2X^@{UyatT3q+BU>ta@!d ziq2YD1s-u;|6K+<@*=YaP8ig~&O+d2P)iXAj2`iG*w~@ppk4H9C!Sv%zv6-X+aDX=g#e87cupNKbf z!P!4dP3)h}6;k1Fs*s!BKLKYm!j5-~pOH3+pX^%j$>Kt00e|22hUcAp?i-$el5P`O z3LftOon6+MWc|O}Hn0CTBz(xEBUl3v$+!RmWKl$vw!`X&LaNvvHcJ*o#zE!&0J^5@ z|2b(!wl|0rSk*`f=cW;Wk)UM%M;Z;}DT9ZidpOiBxY7Y5hm(hLJ`6TA1vukzhODQA z5_{&&REa&l8Sy$y)!LxMo*J7*0$e1`M`naFO#&`nPNuJGRM*kopunC&jyEZ=hcm5e zP;`=)Aal$o%k3!c81d=axo}6(@372pX&waGm;Br2olCbapDWwK76<$|EG)J?-nfSmI2Ve^ zVj);aE+qfzq4ifz{ODlrte-9?gf%qS zNhfM+5GXvj`i#|SBFA^ejf&;M1wnQ%qoXK-fL2Z3Lf)pB@w{q{4c1jGy#0KBnfK7? zgUec*v4t%DTqvZ7sA7$hB~(~PW)c|K8P|x^4FxgRm*~tt`(s_BFL=RdSLK_3I<+>` zmh9;1>wwkt%VIT~N@dTyt#|0*A@M+BXd$0n2*(c$53Y*(Sl_PzK zLx{D)zGL2T49;(>3%OfFH`EeF)R`+(22*QA5>6Q0{Ij)fF+3KC>jK#fkKid$J@}za z$*RPklLpviiqrXL59XH7lv#`Lx|!m_KrA+}P@K7rWeCe>mUAfncW6A=aW2~0JG<83 zzc$<38a>w$96xlP^viwxN9uf7^o{%JF^{veg!=XX95xh4U<}e z!R^MM<*CC~0AJ!*BHRwdOe%puHD({R3&RECJv;C>wIF`|A4IzHGV#31xBuY_4`PDn z?!hDF-2M=A3tQ*QQMZP+bHidjXokO-8FpLE2FF07Q~|94<=_H_w@l?)L{t2s9Ed&; z%J|{Trl9BYp6^~1Pp^FcyI=T%nFs0hyLgYH_11F2wrH=6%IZf}(Jne?v&5RRIB`5y zunS`~c)xNZIX{SEykJ;=fTxf8;|#%EonkO2uF{G0;w*W2XuKsD z4V{a17*fY#$vpvO5|jTHB0ccwAsS~bxc8GWU?WIe9dx<;w ziBD9bod3NT;&Yh4&nlNA_!->X1AUI+H<2`^%DLJ(#^&P~n%h7k0x(a22$&xcp;R|W`gG!UL-D8-p=_s{f&)zvVc?-9$MAozLn`And z&Vk=xOY~Zu#Xq9fuSnl=)QVF4;3FMw*xyRP?K_AGZ-l=fnV@onI>$h zuhPwcbC0z4?id@}(c5Z1@5+{L+q37kQr3Ld)`%KfzHqQuJa}PQe`1n&7v8sDlr)yJ z?nW$0ES8IP(=-?k24J1m-o-u|HW{JFSvxNWLf0}b0!0uxGUDTNe>38zJ4Q$)8DDA-F%|yJ{);gUW`5MIhp4_-PsAX57*>A^jQC; z9PiQf->P2pa;oFNNr)GE0iA%Xohm9|n;HL*6eA zKV(>;Bj?I{#={N~@FjrLR1HYYc-0&`@COPnL*RV))|b}}Nkq13SS7RwKtx%HQ_S4+ zi{kC#EtRK=x12+zLSdZ*?+aj#9o9p%GsR;OxOc4%(c#V!rX)%gQfQENChUokv-+f+ z39$64BL^auXAW~5#MZ)v0;+A+NbzY``-gT2!KV{pAJ4pU&+f&=-S=FHoj)y(S3Wd7 zGBVB2)8|1LnENQ^-if&fu)Yw!==b3AND8AS-wBly=L?ZdS}f9wOZ{DcCs$52aD~T& z+fVt91`)9Z7YVk4*N&Kw3r)3uutkReE6;-63Elf?FMqlC!PRT1)oQ&?(zlGC1=hwJKZ`VDjBiJ6I!eCS9(ltU@)<(;P3L>XbYeC?T4QCyoY}K4`1}~ z!$N%jz3=^g<=P{UNczE(c4Gb^>nW0cLWmg0>>Uo3#^m5wXh1;}!8*O)x^$A2zivn= za*@G3e3+ZP^Di!a=>@NO4aAU-f9Qi^-?zSnm&caO<3C@=oWj-vO=PHHsAo8lSe}&Z zfmhe(6RSZ`8`XU9%t=o>t||Uvv1qoCavvoow&themkW!g==HqltUNB>T=_fEw_n_F z=l;qE?xZHe8m^tNeJYg$kR1!yIBs^ho(Lj(z_&jJ@*V5(rLr@Ze=~DocK-#d8 zs{(_t0xnEybRj}#jif^Xy9zHpUE7B^e8^?hUA?`CLu=BD&FHZ7cJ^RpcBp5`BaKP= zaXjeib$a1IN>byLp-uCHX3&);-H4&uKn)c*ZoOb_ZfR3<6U-G`GekcixH z@i_>|yr%>3v-6?`I1w3?z^qW|lVmSrodbbL8d+eaPtt%M78-UWa#GU1x4*Y5X@8@x zY}?}J0dPk^@U#IBExS)8`<&pM8E%P-bi(N>jadqhj8i~3vfPb*>vT&jr)4L$*d;T-T1XXz@2FHWOqw;HtdG04>Khr1q;W5Vi@iS-T-(k zeh{s9u%;`LqzC*V&|e@h4r*qovl5s$9LU3?yctYNo+Kxp1N4Y-=zi7waM9T?F{d*F zAeW&0_Ugm6JdIe*UTavqqnz#Q>l^MPToapzS{j`lZ6QSXIwxx#a;?s6)(Z)b>H@s| z#K_S~E0f>JYXxgS%NM-i-n}#cuswC7Ab1Y0E1BhNqtVRnav;Jrn+|05dE?nJ4XX0b zdGMP0z4VOCKoq1&qKva~UfAP!Ck5be%QJbh+3O)!x`!6FJfxw8CYjk>*Aw7iFYF)b z=}NRAz~H34w%hr~hg_$W-2|ceZP!_4&NQ)r>P$~IIMY!lR}rO*9dVuvYSG9{E>%Kg zbVRN`@>`0Dpof;0SYvu9>AuM4=L<4_*T!RbsemFNG$#3I-Q8>%%>FRksk7$H$*+nr zBU)J=T7FQNI9M}8)kbZ3Aa0b@-5C$a91-(NKJ1Q66ooA42n{r6R$OO5fcHMMetGBA z^v<2rQ#*(Cp|uSFuyW|VVsgjq?2g6R*{h4@*y5d#*F$^<@@dSvuxVb88pA1#cUw={ z!Fz!o9F?&nRZ9H@t*K|uTzc0hubg|^=xqR6{n=sW>yWk<++O4*Lt>(w#vL$(6p4ep z1PmW>aP^)Xhxs13r4=KohkX+{1L+jujwYIs$9~a0V0FFGgUtzp@;KliZW{j0WeK*j;(bb7oLlbq-Yx5e#7V4Tezc5X*t|S3wy4DEF(eAO>K1Cb3)NW1Vy@ zrjIo{)qRdBwb`K_uCcK)A502#({AOF+lQ5t2Kswb$mS*!hK8+n(eCU~QQmcsw!!hJ zD&PhuRvOy@9R^wrOSRyFHtRZr3lP#fd8D@^8V6XSs@v_w&a{V@+#Y2ANRlsABXC&x z$YwpYQQYy0%5a4u96`q7Sy(KHg5w>1?sGqO#&M8-;EdxUec-mIf9mtbMfy2KzuIQo z3#|`+-B@o7SrRB_(q8Czz4Dl>!&yKcRI7L|C@8vw9U$d_az?@l*|@FJm8k?OtxuXn zJ=R5wmq8v^zY_h+O{~RARTA#O&=dR zMS$sO8|c22z0^I>21rNb#{HKhRs}kY_yS>?m}f}l|EUcBpjYNm&Oic5DP}U ztIl!AmyrvkGD}dG0mr1@U*|U90a!Pwr1V7OK)?iZL7D!T_c_)Ke|N0gOjsb{1A)||pZ%=puPpD` zv!`?R#A@M=k;0AB%hO`!+=J)vr%)`D>2zri;1wFZP1hL$bh(0oU%4zImh7;IS$4jczo})LXpq}tRImH3pL^G zHth#&wW5JmIKG)WCIIHn)d^!G$n0reKzFQM3=@kXEv8&mn34FCdIrYl6QXhrOADn! z1^~no^25l#3&920Ho+_9-@-TV>bYz1($qau$9iw-xxV-2iF+n5065 zFCF@|XFvPWp-T_qfB1Md=CSMAtHiI{SF9CewuqM&S^BmsLup8QOJbn9()iYCaL?3>sJ&DE)u-K@(@`y}s(7C*}Q9oTu1uKZf!T$1a zsz>v~L<6|V4}w^;Mxo{Xm+vTM0cjbpPz``PBUBU0Cssf%x!|nc?Z~$m32O z^ryVA_eHm^oIfyv#cc@(?~JwQrgzEHu@0Xka{reAl>98D$(Fne4asnzjYu&8o@(Ui{L#HH{s`gA z;;sIb184(8!>r0dZ%gqReDWuX=-#l!|s5W>>3x1>aQ1(j!N znyL$e%S?J9<1-!DPw7a4$?qt;l$IcZGjKT&TwF)cGVmh5QrLUvnn+d7J>J(5$&U8T z-q05>PEI6NPRw?SvxU{w_2Qk!Kb{%N#Ing~vh@NA@A$i?R{4-!m4A{l`A+Mblw)S6 zr*I?n^&mUmPO;OGV^$?0zzLSbwV8+bGJtnQaUONoDPc}iJf&}l7kqWwPAH=?1sd4- z>4vWx0>*j>ahQp@Ir)=!0O-nlx7AlJd`(Utr8XzsHKTTkTBQn^;bDP5gF}~Ac{jc} zdZy4mJ~f%xdwVGxPUI3@Lz&j@)e8rgZtKn8vSVs_AX+~5+~*EXN7Eqcrb}7&r zY#CfVHlCWA7>c%z%r51oPM1d#ZRexuY;iWbYuCkeek_?DAAlHWEnN%P^aj{gOeBdk zb>(jSPQ_YL3mpbg(6eE{m2l}u@kHUNJ;8iD2{JTKHLh(313szHkr%lwp0U}SqA5ik znmpXb3^<^0iF`BeL}=CoJg#9U_N9>nxj^Nn(@#u67#%2w!pbCtc8En9wQQayI}!{a zMWe-FPym4np&h^xaq2BUCsqzV=Q(#({`;L@`AS~=LFL??k3WuamqEkdgB(<_K0_ML zDR~jLdkdxzhlUq2FwoH1gkwpl4~(BQU*J`%5#Jb4O5_Pk`lz@_b2VLEjm{l}Z|Zc! ztFLUOk38=(luY&I7$$ga&_k63w@K(W^+9DC`TEj{f$3wDxz&Yy@AO*Vo5h>dw#EB0hEVky96UAf=FP?Z+L_2U|hXT0*|( z_>TGU(Sg2fns7Jgen3zJWE;qQJ#h77#b_6xf+-n4B#2 zMZ}r$+ZN_<;94@*EgZjnCe}W8=DHg$EVQRquiUhJ`{3Vp?U?B4o7#~|EllWGgWxP75(aQ5&`BO^B*o}ItrCCAVF(t~^3n2a5hOhM247-%bCy`b)H;&DS=9~_k` z6a?>-sEX1d@nm%i&^@RPM@xSXvr9pJg=t7?#}&JZ^n{NV;{%mJ{GK_)@#e%!$VNptmV?s(cz=W#fGDU$PCz19W8!(?=*!% zs_<~;fvIpq9RnveRHn~DNg`Q<1v|Q^JK)4fm0hatZrAFn(C9{~K_!b4#!QJVlqriS zDfB_<<_1Gnn8wGtjZ9KX6<36=xHEKn-x3%K&I!mIh(a!_A#g-QxMwE8z$LBa*YpJ21P7EG;*8;(HS_~t zS>8$)IC_-uuZ}5Ct7a<_TB)~#`ygmd!XD^cIPzd=+bbB5c*LBrK({(!m0#KTgo!wh zSpU>QDz#&(4@a!>@CHYWb=1Fa_elCJT98#qyZybL#Z*yIt;37FvMOkl(i|bc+m}@p zC7LVV(4gU}VsdV&+wbpQnlqm_o}Z0IXV2fLKkZ+dxcn@jgj)9?)@6|(D5#^AoQg>whT7f#(1^M|6LOW}Cf-&~H{jkW8N z{PW+i9QU8iE%W1(<{JSjo1yrY$1<&VoC^nZa&MrK(Pm$ivV z_5pKPwknd@q3wudplfB1`_Xpnap1g8Y~YX89`~cGe*5R8JeQV8pjV}>cNE4#^Bbyt2ZQ$(o<2&a3MJg3K^SFViiD5dAj956amCV`@k zEK|*gU};rO2M{l1PI0mIzF>i_CJ)5EpfZ0Oq`fUue&eMizI7+o*k?tp-!FF(LW|T< zh#7^;2G9UcmhYj~5FCO;RX~J+uCH?;!JqGfL%1MZ%i+4VY(_5`g8rCo0{iEzUcYI~ zEkfW3RI3TuoZ&n+vvU<>3&xkYEnr&mAxjeA53-dBFo9zup2ovyEF*0bp|)`S}x}!~CSfZI|qV!ijW^BVRmn z&IVfWr-*rAVcC^#AsBLK2&0P10q{%q*OcN-oa6=FIxF`=gS2%Zx8KmqXmTkTyzMJJ zYpP;gNlUE7@*gP$H>kxz_A$zQT8z3N)ES4<(SfCL84h(?F{Qmp(Nn=~@3lA46k!Tp z;gSaOS!K>r3H2Pi6zqRo6WTpEFh7j8%m|4Z_FN(^Nt-UD^*sqHx?)0R&1YTcED5Z> z(nN#!S)Yyt{x`KwU?-ce69OH6EbD}NR@SujIJ2lujOF{8YXx_gM6Wn!bOra>u6+)pvNeC_bQrbS67AW*VS_-A5VJ(n!fmd1}1^A7IrZ+POFNxa zNXO#c*q0#yBQ-LlN*hm|Oh6KkBKruiri&}Pqr)=gvuIzs6@6N8{@m>u0{I5+@Spc~CtHU(T6oZ}(R!HFJty@AiUa$FvvxHgY!>TPZb z+% z#bA&CX%@~h`6?0yeB>kvR zaB-bm4|mO9m+(x2%G*rdv%esu$?t430+nkdrw1x9|hZ8TbhgR9%*f4(SzMX#dnXiG(FQg(C|#OsBLiP z+LnQa|BCYicYj%F>_GPoQaB1tdoqaKn(=%1grzW zh%mQ`o(<0vV(Oh32bHT!E0#2bwu918$|a?-fLJ8s)WV0LSBn;VaGZEdcj0LeP1Z$H z2DywrRu3A&DyvTA$RK8Gi%SgEPl!*%e>QeG*`o`7(b0FZ2-fg01HSdsk3XHc(^bRL z_doskGpvTCx1uiAVMH+6f>^9O6|r>@Xb;({hsZg{$3}({osIS6oXlYNu(Ja^PH#e> z+-O^*C+RE?Fr?rS613V|Cv#)zPQp74@UdSEUdiHL&Vfj z5IwC?eGY{Ja8&DRt58u4HLvNp4X&t2%f5yNpTDWrikKegj2=0mqK0xfR+%%A0aPO} z1TosaTy1n*Na|i$T8@QshA==JAFhR+nOEv4EK1>*uth0fTQWIxye%@QK`#6bz+Qod zY6#yQUXcb0*^BSXuhj7W0)8NZhv5f8_r>5RrdrfblPx94MnMPY>WcDsOGyi90*i)t zwv?b)j3tH+OELg$n-v-v`51x!*p_}X8d^X0k(0j#0)xTFihETtMiL@Njvg@p*eFJh zF5-qk>Javdlz>FXU3VW|dB(Sr8|4Vm07G$MKuMr)IH=29vr@EI1Jv7OZ!>c9djtDS z5`jZ@8f7o3XA&_oaliVvWK(O)x}G(ORUIw;t^H0a_HfyqEfup+G>{mB(yp8np3GeY zV*F^7dZtz3wLmMgpiSdya;ZUYsF94JOp{-sq7<|rcCNA0^U6q_?JySrB#t7g49A5T zCTJm8651ZnLPmr9O?hXY9Ngz)HdowTT*yDpcsSI!(}#rK#nnrabz+wa^uhG}#wahr z04ojY#aa?rXx5of8g`^C3!NS7f(A*SP&DR6AV^0} z3L*L!4EzXjzVIKI?bE+f&q`XsX9aP{dDidYeUIW^R?#w$a&XN^5^PCqI<%5zDl2$DonFxZ> zywi?A0KYW6FG>Xf92c=?951h%&8hwt7R#jd%W|pch3^6fSK9O2u0EKogYB=q1)=rs z?0PtmoP9j5wfKS4@#Uqe0^f;e7VHn-Fq<;>sCorx%n3!@2Ty2tL36QQWgW>Sha--a z0d$3u=ehRHd57@&laai`_hN&S$ORPfjeH8SbVSvuFS2H2Zh`@RiFn;-87PQ+=n_)W>KISAHoTX@;+`k=n=>Bsf( zwbvpeC|Eo7QmHYvS-o6pD{WSr2hcmNqph*tqeCOjc*wpM2(A6-9tZyen4jrm!R-SA zArV4GX0;_##|wa9cLJl_E5rz?xr9PM*ri3+L|z5k^kwBax$qb13F7sRd&>2WtBj8x z9BK+J58QPiQI^#!uB;Wk;^IS>T(>c-VsKTdu3BB`^@Tf*r~Zu4z-skL_6m`lTbrF? zu#Dq5^xJ#p;Z8%Wi_opEIFZ65tioN$Ww$QKShyUO9{Q#!#)2CH9jJi8O-UNhateC_jH%Qzri+>*M;;B} zSU+fZR&#KliK*7@G|HdmtQq;UTURBUy4RR$G|UcjJj}!OCP+F(#ib;YS+Zmk^E!FL zSi}U_wZt1q?>uc}ZG$*kC8hBau8KDYb#ac(<#x^&I&Qd~XK@XgKu$U3dvi-($nbZA z#X!;rjT5ic#m?X)fPlp|%X%9`^F$tV)JFp-&t;RDW8OJO?v472$PP{Fb*O!c<|*i# z(Nx*QOl*%EBuKvv-G_BGpnltkMknwm@WQBZL?0o3FN-8th14;p$RthQ4tpjQpU{O&gKQ)4Og>0(FcwG;i>Gw@4N!vr6_2 z=#o;Vz%rZwCm)`2YDqC<#?oVqHHo0OKO{JQEI|qMl5#FjX=%Kvx*}3mR#GO^mPY4G z!lx<)SZT?!QwC?_YF)YHY#d!)Hc;p}%gaHI*g_N!+pfxaX(?+@PMSGYaU89=ux`lt z1utie0!vXN>cBp0wML3@c|}=2Qgzo~%sncN#H|_jH)?uGj*nJF>6Hln9awFBZDo;` z02PgfUGv7t_S&oAaIc{%j4Xj->BuVu7qI-s&I4X6GCWC`G|nSSkU~hyfP;dQfYa7P zdr?;Xxy_-lNGJO{2YM90Vu^g_yW#qif`sL9r;@>g|Uk2Y<{+?~XK<>MG z&XyJ7d$|cLzQI-s;u}*$R*3IH(^z~PXU#JBj&v9oP3GjX`2Nnr52w?MzqRxRcliFY zDu;*nL3nrvdb-uR#3z)|Mb^*7?IueWF2)9iDPI`s=1Y&uaGD*U7q7AM_W26RCF zi!O8G*E*r_tIMM0^{&3+g{3iL^^-^%v@nLOs)WiUTN-fw8xjqO+^MfEh3&_9AU%7) zAUBi*;qchA*SrdGOvFqp}sBeSZJJ)Q%mggZ+zlM4sH1y8ntR?oVw4$22wR zS_IE^8u78&HoH8X8@akbuhF@IVE7p}1XN{D;Re3L6mAHyOF(;ywl_^_+^fq3eLD{{ zR9i2sZ-Y96Q%2MV=-a3vYaV{NISdDceCQ-{MkI9i!t96tNw#g*hYT6kbD2l=^_iVW zIexp|7X6Nxi*J4MROHE1pY!ni1#t3e^|57aTf+#G_LLrI7S_7u8Mb6QL;tXbEtkQ9 z(8Vw?&!FYOm$D{Z5_Tb9X^D5fcjQA_`vuh+X9E$QL>0o_T+9#fE+*;ba9sraCtvXt?!4z{1VL2m@lfIa zce2o*C;MkQyv1;5o_wd7o(m)%n!;+Rk51m>MRg2e<*0j;KKa|i+eQVpAtmvAs6xAN zEHL_FH(`KF1rOwZI*z1cSxE8N&zC%0pr;aao*vko6NqrBlXA>3fYsHMm(pr3n4re9 zyAqa_DsbXRBckb{?7--SKtdWNg6MD+3W{0PI$DOQ4IEj)bYNE{)QxEOa1+NQRG6M~hyk3G zSK5WHB0UOwK2RxV{*@?%6;BF&M3u~iBbY3CsZY)jXn*4I9};e2!(`PQ=^ga&p6g|v>^TdV3TVopDcm;B?Y%u5?sl_9WB zjcTV}lkB8rX=2;fan^}I4C&71ojzR`3m=cbW!+HD*6PyO79B6yjtbeV*60yyl=LF8 zfuSW1Z!&O;(_ln6EEQ~S_*`yk4!ET{x{~>@%S)-I1!s;+yedmJ+F5z!S{=)uF^8(# zr(F50cCm7(x}R5$Dq7*IHWNzg6U!}P9{GRvL(4CyJ9p#@qv}tRO_dcR!$X6t`O(~1 zFbpsA3-T{!7wOU?m~fr__= z_qf^M@W*~2-?cOGYONCiZcq$a~=q;pG)C){VKMnvauh3q~(Ba;dcV@7};8Gwmm@ZJl22>ZM59Hp5 zc3pa;K(S!|iNWM9H>7CESr{Nl?PV_ysEwI&)Jn7$bWX6qsDaPp8b@WoBIE24K^#O@ zCb819BUiOusGc>7e-UBvc|({QGoMPQ^+ehdphxvpnfFAV%si&IF2WFIWR^+m8-6m` zCb2x#MF9t~v5|QmtF^!*#SqYYNV$-qp>;8I7pI<0EwX}p)`gChQA7oSO8rW{BlsIO z91;Ht+yah^c7iir*VJ(aazh}f3W6xbN_9MH%3q5sSwd3j&$9Mn2;17AjAcn{_0yS8 z=?RN3{pY7oXAT)`ojWJEy6_b|)>QPXSk_MU(PSO!jCZ!fj_acOy~vcotcZM)UMw@7 z2TzSB=O<#zT2bk4;@%rx4v6E4HULX{V{+?q5ZEMypgRQ%27DVgsZlk;i+?jZMNbuq z%>Scw^fM1e8=C9OsvFAUm+eFUZ+xz)FMD=FWhffyL$0>jXl3w)f89tpV7bstXCoAc~TYij(MMXt*1t_AVHm+N==d6mn{I1d>w;s@TnB-ReOXf& zvlQ-14(sm7N$C~lE`_hk^lWhEeyPkiTnrFi#>urB0RDQHEBWK8|tdr8{9?^`}B#=f&}HFCXJw6 zBhAOGG*8L|JDcy|a8?kB4=ezLl5&KgX-_J67-?S$&=r7y0}$$bphY|lLY#wqae@aV z2ncB&20yXxi8Nmj*y;J!(4W=K7!{R%!&>%dI=Y+J4X3X^RWsOX7~^6;TN_7iSbWEk ztI;*U@WmK#8HLdN03*w!)f4OUL4oyR38Y0M)1r%Doe>YL6@mzQgTI=T>A+#&T%6J-#nvo*vm+bB(-X(Wr;y@%5e7ME_HH1-rj!hmWEr z(N~jAB6n35p$H}Snb~C?|LH1&e2(OP*(gW~fhml{G~N`)_Yw4ca2u^Vf1nJZ+(Zaa zCk~}5KA1n0RF`D8jYCPP3Cj-*0NRZ4Q164!Ag)ajFTkQ(UAB`jyzYYTabzDKV+&(sbiW>EF5}gJSgIE(s4QQ4jCq<** z%+s3I(S1-^0?9DKG-LdMr30y8wqBe)2D8p%UxCpwjtrY%kW3w}HWe7pFj5geIiAO! z6Yv8Owi7@RJQBLiXQvhm1#^Nfogy@HQY18lL*N}9GyVx5d68Pjm0UU7Es-m|Yb zbnro5AHLqZU1lb>jtnsx*q7?OJ;S&gF=it&*;>M}S!flB$8)^d z&P(OlzkQHpGGwt4P{GZ&U9T7rXNPvE99eETbn2;4LGP6gweFga;yghxv&SLYU2_qR zb82$XiF3;P?P?hD#iBzFtY4F8X=1nMBGhR%^vu&C`*1)@Atf?p+-&`Dw7c`i4t&A! z%itV)b%G=W7C_Jl5YB#u=yM$>qoTpxOKTS+SZ=_9!8^TCBwNeXpZHx6v+rJU&#Qmv zFzc@Q7zUwZbcVyGyXIk4Vn`!*3QlX(T3t!6*4pkh@C?D+ff}&}o8CSiMt>G@!THhD z$crsPuR@{6AVbDgOLZHGA)h9Td&57y!nm& zC`?<@zN!C>=^tHoa-wz1C5?lF0}U^J^Y2NB%v611ptWT~qMmNs&TUt2I&`MB_v+(U zuhUzdkDU0Yj~rF-=)GmL94T!a@R57_S)A9aHM(cggKL-%gN~yLRe2;rS2DjC8Uvnf zv_`iK1z<6%1bsV0W`Kb0)`BcX=ik@D#z2b^ONa%Dmf);mLBIsLASJwTK`teWGVl;{ ze2Y1sJzj!tX100fGBdG_CuvolGd+KCeqh+w&z4vp6XWr|rB;ZCI|*7pE@vbOn`#U7 z>}}&igKO8cHWwv}lR0N3xgEV&cR)(%gpFyHOQTjB;#yqokX|6%^wWY0C7bI61|WQm zlDs%Dl9#S%3g!l)QX)11IC@w*bcvt}n>db~a;eMPX$zre*DUNEb(ML=h^^OPTQ?8y z=&ES!n^?E)x?PFZ-LuErwr?MAXdBqLqkq%BzM7iuZGC$GjoaHAwxzDze;Q$88%Ma^ zA8P0tXzA=}EH93?Z#+ESclCjRctvgLnXge1#&<)i>4FV30c=}uQ3>?R zI4!WqH;ondgEXT`C8>kqwGbMtq~RP8hgUwCieoyIS(WfFi7p{gH&gP$y?zCmeb}>M z-?JDj0vCeMx9pTzexi$9%fB#c=(~TB<)jb{URvDP#Pm=}aVQTDpx{faJ=W z%LBna5TOAR>$2c9yhmjHsfQb2$2%;A0$oB)`$qi*LyZokKc$~C0X>y$m0C6DB34()P5+?*0V9@>m*uiN6 zmHzeyA@LieQ&xBjpF)zb(Hhb{0w<*H>9XJ?KxiCc>&VMyli=QqnaXK!p|x?MlwV-U zY^W+iHM1i5*cpkeDP(t_Z=4uin{FSR*fG)EjDH5((`!d33fba+y`*dJHvOZF9zVLZ z8(G8+-CM8NqAz5sw(ac#=Qla>GtsAD*KJl~sNDs#_sDSHy49U6W36K@N6A}F>5a_n z7HlG{By4o}6t;rS3bBs6GZ!Fl)!cU9f~gGqEdUm}QqZe*pivdto6G7%xH{o?4Fz}m zfAhL-*Fa}H&@4B6ee^3_>)dUwZ*D4lVP4f%U*FbJU*BRo=sx?}rI*AzU-X-Ff!at7 zk&gC;y7IE3u40fj9AdvJOhPnqu82bK9Wew!a`9QHqd2gF(OM+qNHCdYqB;lxf!<|6 zBft}k6C?~t`oYmG972-gCMGN?&M0!807B7_Jv@3>0OcGLbjF14{4eOb`@=Cwm(5p} zMxLWh)cZ$KKckKD5<`QC@$YCFZ65VCe@{+X7ts7o%z@d?=wKHhNNzi`Muf8yn9&%Q zO@Z8A9yLP)v183XwfB(G1*`^MRgNOWV}SZItP_*5PakT+;yj>7q1!Ps zhg%G1JqX?G`pwp6uqzzxVkcN!y(FqN>YTJiW+0mkA;+`^jk)^RI~11VAw4AC2|B#` z)-*xtZl+lmDeqMARHPq{Qzr4!pe%lh3S;M_B-0Z%9w>nq0L~o&G;T7=d&>_-TCY18 zLxGtw56rWa<)9AGYhY{z=5cr@T2D6x0N_2KcLnD7rGQA%say4UNRJ@JKG(0dg$+;0 zdC^zoCc%H_rd^5k6G1Oq>$hi_&YHa`Dmn`q5~26#qY3A4wWj=-a8+q;G%wXzekwA7g40(n zK8(!EM|m{+@Xvb$ztzUSNpC<%brf+`R+hz~f1_J2(25O&I!UzBi%`#`X=+`t$5+C-b4He9Gn91XkwNKY}HaH1I4`n z&zel&ubcx%A#V;xU~G6Iq<2kp>Teq~-gispKkxhGC-1X9&1W-zr^obPW**WfM26o4 z{9Pe7lS=i8WqeK}Wq1Hq>Bd6ieXs?@iY*QzA`gf(svv1Kq9B|-YcL!RhII*Qpda0$ zAI>PCWITL4^cFaH82XeQ!(ykgWgsMTZp>8mhd!48VaUx<-h@B8XD1uz78=BjYt!(rw@@HM9)V)T!;6swC^v{EahMC;l0wtu&^IHZq!mpr6RkE>J}-BJ_#bY8g&qbFlyCJsRcSY+eDRz+e|hIVMEIJ73wZY**AgPl{H z*0QZ}AU#YZGiT;&FWB|d?6iiik@a$ZJ_%%J3tt_3yJm5efN}VI^%o`Hr3c-j5j?Oa z(cVhj8`|a3^Km9=uVQX@CulKq>j~|ZrV}xg0fs0AB^G2_@X%nyv9(PF!3{+dd%sEb z^eY@mVtC-5j**HsDRfN0#8fRon&PUHbW!X6g(Bf)&ziEnCNo?rNU;#Ut|8CMgcgnJ zJ=UGPep|aKejipQsy4p<(wi*~zX}}2RG+Cw%PzF6PT-_gFA`-3uqOG*3Fd=$Nj^B@ zEvUeHMd&=g2~e4v5>BEb@zTsM4xhs&DDyn9n2o7d09C@#`DDl=i2TgwSV1Z{D9e6| zVL4BLt>9OGU%@ollGrkf^mZF}cFWT1B-!MDRd}VqGGxE6c&)U`drP1Ql{bhh_I6mo zS{0lFGfB=p#1ywuGHsxH2-YeOT(%1|KNA$;PZ8>jm$lN8jUL!7!~m8vMbQf8e#8(|%&LG@^|YJFdKS8H=l4*D3g$$OZC zzD{To^abo>7NnEHM4)d4C=v@mp}TZ8a0nst=i~WFO7?p2ejrFVyCT>wO^!^+c$UQJ z($_<=_=J-}-?}6oT`i}~^9!RA+w3@=^JhYzkh88Uk{t~V^hKJi^}3pPeTi2&+q3a) zq`^bN)$qYDmhI7Yh^nxILh@@b&CXpP$Og1N72pWtF_a*AHL|xD7;J6a6znuB1s!lE zeLZkPm%ly%zz~;K)=k5K9sRK`EgC%-tAgg~feiLXjDzcitWeHc`IiII@gQm zP@lY;mC7@ZozOpVWy*2Bh;!BqELby+@pm4!rEY~_8w49=nZSKb#W==8gw>3L=}FW^ zdV59E^ssIw2+bcSe?{c1%o(g1XJE5JFGAr1$`tR<$>$gOv9yA=RL>HpS9WQLKT@i19;C@WM1#>L&@3JUq+qT1gZl4ayi?;{eNrDcGi5%+5Yd! zxTe9F+cZoqF$veB`rSZawqk;O>!0eoGyne7XXj7kDkcg26LxuUvf2-Xh7Rd-$qrOk zv6ZJ7N)R6^&Cj7tPNzQ_{&*sOgZzRt!S)+Eond1BBXV-m z-eVNS^B+&7aYu_Ua#lx;t&Mb($<@XRQ(tn;&YK&3xkcL&^Y3ci&~W;(b*G1*HMO;E zyFS@Cx^aEgsvF09M-!EmiBVIkI#VAlI&mV?Py^j*3&obActy#H%Gy;UJ65mWF#;Q* z$mm>m=5L=%cDBGDIX23O*i{|qV7Id+G=F^!BVFfJgJucW4liQ6`msw(cS`@W^;Lvg?uDi8)|#yl_&VJ1iU+^S!ZKb3Qd z`VSaBp(KH~`A^8{x$IZxPnkm>?R#Iuv~JyVC}ea68(>2y;p|v$Kjc%>rWNiD>II{W zyr`O0!xoJy2NCQE*ovSpFrbHL5nDi%1(fzNC%*<_ ztrx=&`vu7WwS`?FQ$a>5ieW)uW_FX(or63NqjbCJHSP2VLneTT7~^AbdxlLV+2n|^ zaSz0at>r$bO4KjELf>}!#F6FJ#HkdEpIc@rq0HzFLqag@32mZ$B0VruG%Dc59yW%EDz zN%qB{&VlXw6VW;TVhNptssnog+9EdC!4ML_DH!S)iDdQjV0yqXzKt{B5CuRAz|j4N z03x~p8BerDMPgw9aE6k~f$)(4q-goK=J-m4{UX(Aq zi?Y~-FX3r!j<6?RC+dV1&fU*W!^bm%*z(X(PQhWe9r?UY6XcQ4$9oTDg$?>(zUR-M zKx{Gp$CCGNE)+!gwv& zZ=0B=E&!)g!!-@K)cjqMox5MJ>ppmr9<;?5n&-Um6FldBk*`|S-!7NCkL~5M_N|7< zMj_T~?ShK%B;F`C*)rZ*j)w;5B8vY1NZkLOP|4)@$w;2;vG@tW4p75-!|KO4+pX#) zA82jK&Vg^v&Vh$w1&I6{_^e@^6JzMRM;8j_w`cuqg6Dm1BC`>|Ry_|%Z{IRrJNoI$ z_NJ;}+)YRC$=uxb_DeeTk9mG2xBUac1K>oXpQkZAw_RFSAyB%g1yK}m^N<5b^ambD z4BmL-u=IB6Q5y-MXci9up8{y@+o7)jl8@yEdf@$jTL3lat4iN4K(9T~n#~nE& z`i0ina7X=yrk=s6t0zmETAJfld0EXgcb&@o?%}I|BhYEfUu}PkQq)H-lTZ|U%aYx$lh<^z1kNUN*}t(& zT+v#acTWGSxT0h;G2NhSc~IxO%dd(5@Qw^W<{5?1)oCaGA+v$2tk| zqGti*i(zOIW0Q5>g6ZrI03#6rBgyEVx55Qu{+~~02j<{e6vD7%I=lXgF^@O<_%qh= zx-1MbUWU2dEh3vRpZyODBV;8A@`&ctA)WG9n5Iwfug7qp8lRQ7$@rD(%-$1 zpk)$>->&P5T$^Ke&2M*1bNSuN_ck^-qTPx4?>|~D|D6HN){5>)M%Iv&>TLRD#`kF4t8q}zSZl>m&K<#rD z>a9eeHb%g&>E%9cLcTBxuTblSr3;Qt6Z1d`Le?TewSg9QPowYxhH@d{2a1}!U~dj8 zA&yWLdx3M?b0JN+J=PM;n*q?6ffWgPWz^j6TxfJ%0qK&Wx;ayz3Z#7z63aOL2ivTT zX2cpbi$A%Mx%P1}EJ$JWSiHIWNi58BifcNW=jWR{YKkvEH}$2Fz5TVl{patZT=wsk zEp=rtE~{&P)d%!XTGmgj89LnX7-_h}J@e|j^=4{`-LST?PI66^i8jjeDvy&BB9GX_ zAY<3V|ACx7DXqAQ?p{jtULG+$`dOa7Fz`6cKqgVxn}|?2S?w}mAb@di{@pnQfrZAp zkvEY{z}Q2~$QVQ}$3aXIv9BP+5Y-XjCxJbxBsM^!9_-v=D+$cTkVdtN_CsXr!N$*v ztCOy+zrWzxXzTN@EwDbqUo&UImk6@l9>mjqR?f$miKpAVX>h};_PQE#GW>YDVXVtY zGh)P>U<@_!bOGf+V%q3|7Dj4KC(`WRaFR_0!Lcq;Q~#H;Eg%E&bU8rS))JU*(nCBj zi1n0lT}U72a8_q*>-b7rhkqNp{N5F}4adI!my78c?(n>#9`IV_bWfK^6Vs(p=#cP4 znwo)bFQyB13mi4VQCMmY#BZ?+JjFJg4`9#3xsk4uA>e|1K8J1J=q3m&dVqtlAFd&V%9R9zKGT2d+dR(_gU<;O0`>knbPU59phuH zlwhGGjij~&z8non4Ylv)yt@LtNm*K zAbP?!HI$cmTK7O7Owj!`NPMlv>#+zTR3@67J3PX{SYV~96hr4wStEP6vnm1ETt}cA zD+x?7@)@(Rgcz^+9btRN*r5z<;@0&BDtyoz&1AJ^Qw_vx4*PN295zxJukY(wU0TsQ zG}^LnY;gPd<--llo37Z_b3^aCt44NjG+3=4xMZ|Et5Iwz8^xj&M@i9%;*yf0p01Ae z;^O+m$eOj=2U<#NpI1@7I^l4;dh5WdqUzOErg1E6ZaCv82MWQXO8w00t%d7F*rqRl z0nhUy?j%ivG72`inBD>Sk+i%8$lRa;6+pS86yqs5?;WTSf!8w zKUgR=mfb#l{E0O5zcJ-O9B73dTUrCt8ra8u)X#ug8u z4^{`uCJ^V1^Wam^{b;mPJ*u#8Z{B&JrCpb`M< z=owiYmOWHObUF?Ku5<)V3pob*5Jm$MuK>Xd9|X~uhgbTewwA!ji}DnBae;zhZTwHn z(xa>f773-mNY3~M)nf(JM(j6^qL2n0Qnsju9!{msq*8cg{f6mdk3W8FS~iNwS&xZ7 zYCsROo(B6cBv!W%bPVLx(-_ZnE}1GCcn8pV_eN$QBR2AS; z6IBM179JfwHUKc4nVwBIqp>*BrMD+|cORU1hXgJh$D2s9q1mSav+*%_r5Jo^a1LP9 zR@H!`I~H111M&zHs&i*@L2%(kHN1wZpsq~ujp-)~`NHPs(r0#$jqN^@F60wyx%rqr zl6mjYx^+Xm9=jQg^$OUaV(8<@x*7fI?^riuT@RyrTKZf2^Xg_G#NAcPz`g;Cpqac< zJWshR#RE=Mj{&{7gIyg2VHv&xx4$s-aHPO!vK>JiwiHGTStF5X)rGQ36Q~0$mXw6%bWzA#l3LsBv zQ$XvLh6f~eG~^`$l_Ii?!W}PQ;J(Hur|rv=Gyr zsWm~d(#1H)S!9sAE#qkod4Y2bBbxzZy?mZnFZ9)6%$B_jhY1{1xeX^^uJevSKv(X8 z58N^o+@13rb6zwLn>svlYpsvrJVV{$2SV^|&UU}G&9x#j%hs)EYOF&a>5|sc*3h~Y zJ=un}ETpqhO%~F7fIwIVc)DY(AVG?o4Iebjd4sVJWt45!oJ|DmJlv>BQK$TxmtOUX z7^BDn)hlu+ft$i1-I?d|k$tl@$nEpdD~7kl_l2MoCxkUBV&LWL)OV6qhEle+FgMT3 zidiR+1qyJu zaJK0)6Q(cDFu4u?=FT1SVNjwR@!E(6VEu_D;jHx2(}3>QAyp^fS|dwpqU2vY8i{S& zl^s#s)^qB4h9kXw*YW!7m||3#D8A{4&yDcjQfnge4$S-?R`-h6$C1a**kZpX zg2b*#?lxbxuDT2&U8IqvCXAGhcV^!X1Q|nopcf9}xg)63KqVgpqlzJ?2LOu&gbsp% z@Vm$jq^tpNfdXd?t8hZ9#S{ zZ$T6*)=Rwa(cJPA&8Rzp|7X>N`g;9S<12sYHAl{$Kk}NHBk2?ABgRvH`QimVoq1hE zXCBe}`5$o(#BUXxSL=APNpM~rM}-elks9){-{TFky3$qUC6MfcH7Epa5ZlGs>tWjq zKFbH-SjwB==@230Gz1EFNch20KopTFwB}Z#*h^drVBty>pg=1wMbR9E&>J0X9@p5}1h*94SR#I1G@KEK`1ZCE1*LvQ{01vg*q5fn z;#H#XCzE)z|Ncr8A9pdkuL|J73(tMAhYYQrxUg5xUHA-evG)Be!V!sYNV1O%UpI$5 zM;WLP=m+8@8s23K>=U=gFT^6ZVt-YtR=tL{xE45g7Z|KLeN&1cL!QQ8BE|JHYnZN-Xl~1HfG&{l-3{b zqeB*($Poc`o4`OgmA9aS#MR;lD(t;NVJWB5A(s>E3ymG|qv$D%!e@70;_p`X;l`bx2bT;lN09u$mw`OFI9Uf{*?Lvb7?Z7hp(J-fIUyb>)E z5@CQWS&9G>$U-bSth=!$u*d_*8GF$&G9kD!%^sA8r58%Khai6R33hy7PDm_r6Ld!# zYmO)&wQ1wXFtb8ho2$mE^J2~0LG)2DyJ!dk<0#9aU=5)iE!6>zE+Zp00Vlb!<^|@1 zGcvy$oJ5<8%4u+B=YnkXoL_L7tPMzaDmSmuJ+rr8ua~4#uzBA;58YME^q31S#1EG1 zG6(-8*62>zw{57SmZtq_XmDV|YNqWVjM7jK&;B%qj0hM&NII~-sFVc}j1fbye$v>< z5?)fwBMXJjA+yy7X8m-4!=ex}i?_w?5_nF4FnC)*9Z%uUly!G!XV|@B=o~)H! ziOzX_7w`8F5g=FYa=agkC=dlIYy2MA=$^mCqmTA%5QKu)ufD5_1N~$Zds-599h}MwIHKAPfK*vZc44Nz$LQKNhX^Q%q zaMBpH{+yFSG}79#IS0sZvL99X2U!S=wT1SN#0%9n!esd5&9?JI%| zJxVy3dkr*Vt<1Y33>Fzi%$#N~Cb_>H>U_QwIFG`Ya+XJIiRJg<9IRaxc^{gaM{#^Z-GZ>5Y(c`zd9rB&~@=cTo( zm9bEHd}pwwzOEXCjxPjZgHXtRZ^_-`yxI?ok3y7p1-Y?Zg5c;PvOQEwfk_Cw< zPXeG4G`qXdRO}H$d*uoWV57Aszl z5D})s)sBC~Tcpo@`G*g_V{Iz+aMtoX`Hz{eexv)t&|(ff0)--?uDlRaUz4?IQC~y2YF;PlN2`^MW$jdYCXPLJbcH;Wtk@O9rN^rurdQP1v>U_M8E1WTkohKf`%nhQ@6ZA6G-eHIA<{yYj~Hx;P}DbQ97 zE|wGR;F$z%vNt?3Q~+M0Wy0VCq{t@)K@khqMg~&m-8d953(wx3Hll4dp6eyd!;%m- zl|SlX`)l=m)qPu}D- z&nvI3D2W!gtlQkxxqWC=al9-xSzcdJ-%?r8SW^})ZtWTF=-4*6s-pI$irTW;7OYJ> z*5>xeZ(=2SLD#{P*TJjbxvHhP&RU;5Z6etUIkp$|J*rq46KE)Egq|5Ks;4K!l4x-h zW2JqE2uQc(mQI@8QD`1eZj_!fd=|08%0wR<@?qy7v!LDSz(vlHwP+sOY)>9;rHeG= zo#04`A(2<&cM;x69ujz@(CMr{D*FVD+cF-=`5hk{XRw8Fs*HcsTkxCed*Z9N4|m*n zW9oIU>+P!QDhaOXkBfE9nz0_eYFFER9lw2D=KBfONZ0C;%(Eg(twJ}>id7wr^+?o0 zr(@}JOd8f(3*O7t!3_L@1O@tsWg$1k8fgvkRa8{RK8c2GZO9oe!@MW}1eWal2P5P1sn8PmzXdXI)#z1Wx}~VS#)_ggnx4#;Js%_98^Pb=9Z12u(%tH z7p%|(%))p9I9L0`$uv8CKq&r_&Eh>W#ks=6RaoJAS>ZCY8sVXlrbWnJS!#ht zmjx@hz9T+p)@s;A&3=l2?{UJ1!x@ob*3G`$+CpYM4B!&;nmh#?P~8XuAlpp_))#WO zIk{%9EW5te`yLHj*8|r7-jlPE;)Kr}%UeA@)iuEVzec`+^{ZC*CK-)bQDXZGdp+D+ zAwS_MO(Yq{(-RE(WU_>gVL?qgKLw;fbgcA{Nv^>?Xpt8aiLA8CF2x)oahh!YQEbs@ zz;Sci$vd5YhUOCR8gT+MQa86Ti_e%j~w)~mcgC{p~#eK zMVs3Fezi@idCs?hMK$15^0H%tg3L2HDY7_)I5+x&;IU1urxi}A_5EGo z!yWB(fEw+<_g}!4YK_@|Whq}u7?GTeHiy?-|AP+<6&<}2na&>Rth;&`pAZ3DmTSStCa9!lR>hocm=9`{=GZ! z-w4#6bQC^w%quwwT(p8fHDb@!s4n#5K&8B{#OhUazDU@2sjAU6-j3}>Xdy~;&<>0z zL`w+12JK*oCuCY4gTG5+USQEM&>)Ue27LQ~7#u?~C*?K=MPO3xaUX&LA#(EMNC<<& zr$Ue***MA4c*y4EAqKn`Wtb#mvMSPzcyQhA72&<>u1Bu->hL~%{Rvls_uO-5AN!0e z!TV`E;qlC~LKhpX9N@h8WrS2BTthRv>H(qll13T2?@|h3!S7btKxx5S}|q zbP&FkjB=%I)(7Xp>qZAKp=gS2oEh_CzwkmVjmTiIG~SqO4QdB7V;K}iMknD$0T9d4 zilKOnMeAXb(^0lQ#ESGZZs>0QjxC*FbdAcUuBI8?>gp#fzR%M$n83DHC-uv4tF`Z^ zz+&P=>~nGh`;W;u5%z`A$;^**m0o+2kH15&EjoX3@I~i;{#emg3;zV@E8|ou)sbWw zZYDPNDA$96MI#Z#Y!;#bJvznZgkm|J?tA`h;#th^9M@8m?*P?)Ewvy;!Nh?0W@Med zT;%Z^hw4E6USexbsnSsQQM06X7_0|WI?llRwgmo(zc zihK#B4mnjcbK*r1H)E5lj0s{}2p|9eYx1jTz~maY1>iXkcjRS&jmP|(f|)gf55&=@ zic}2}s>{(5I)~D_43ohF_*E2TDjN*%rC}L6Q^9<#;*}1Jylf}OfO=#Pb%Okx6u1?W7?A0nOL z=>&!sF8oG}h~gP&OAz(i0>Rt_7$ebS!B|?Jy1E#XRQ+Y5A5d%dOH2J{Tx*j`m^?Pks67x z2pD0D)isgr_H#@PL|$z_$MOA#?B^2I8LhXUOVvnpqy1ctwVB*Ed+u&TgWqyK(Kpqf zxcZj#-2C~w&!q+u2WO^+5}QUwMhEfF_)ucc^mIZ7&L`$l^QpNzQm2OYoJ-x6I5s&m zpLkB{RC?j;RjD%z)01;shDJt4wi9go)mLxlAdVR{V{%~<7U-JP+jAp_?JAwN)k)BUXCeF`Io=TmaoVzVCdph^EhH_qW z>Xy{}`P5uuUE-?r)NP4tCa2R=shRWX^9~zBQ?qB)J~gY(sk_yjN~<&K7Ij`F5Kue? z&Y8gWxCP(Nsd;>RH%6t@0KPq_X7KwEK5bGX_}?g!bZD#O122*_h6rMeW zr{@^=6z&;=Z#tOs1i51qa-RfdN?;}Ca6N??&k9~{!*{dlH2z(BwT71YeF|&M6*`Z9 z=P=tk{C5@ZJEeGsYjCe=`6~sSoX4m0d3$3BV`uSH_&+hgDs|!gjH%B5zX-FRA@}2) zgBzkNL#Zl*)=>e;r3#d#1{A&yF`5ma^G(o2S|E+LLDK60#p+b6@i<*-jp|lwRSzDs z7u&TTJ<&H{a}HvY3}cII#C=EAW;KSz8&_M^HZ_55vIBH*7xLhfSd6{c`1{oXb&0xE zU4|`vNL{WDW7{0TLSCV+#H$|z!j9v`uT|Hn=c?<~4eEL7Ms-5nq;3W>KB3;OKB@j# zy;yx*{j>U%`mp*F_2=pXpqB4fzoFi%{!D#QJ*YmS{#5;y`UCY6^-}c)^)mIF>J{pD z)jQQ6st3TJeqFs*y;8kVy&MwjSJm&Scd3WeyVc*QN7Z}ONg(r8SmwXOay$mQ`@huR zt52)XV4?p>{e$|fdKgFJ)#?lCbL#V0i2K#8Sd!a-{An!DTd@qcV>|NTE?}YVP}UZg #`K`UsnI3zM{UazNX%y9#L;nzpdV^-lkro zen-7S{g!&2dcFEf^-=XP^&wFD{{f}!Tj~k*ZS`;JJLw>7z4|ZRpiwAKH|rMNs@rtC?$E1rr(UfSx=XK7|Ej*J zyCF^Y=ykeR_vwDUUT;7H`Jbz2^`IWo!+Jz-)SD0mycr!Nx9D;8L%mgR(-V5T-l2Eu zU3xbp@jZI4-lzBL1NstuslH4f)Q9xt`mlbEKBAB6EA*B6Dt$~}t&i(#^tJjr{ak&$ zzCk}v->6UMoAk|kQlHdQ`jk%T)B23QMW^+x`ZhhS&*~XHtIz4%^_-s9=k2^l#{w>-+Ts`W5;?{Yw3t`nU9}^sDu2 z^lSC&^y~E-^c(e?^qci>t3~}5bwU3Q@^ODp{}26E{rmbMaLlLmAL_U1x9fN4Khp2i z@6vy)|EGSpevf`w|B3!n{a*cN`hEKS`p@+T^k3+|)F0G;r618B(jV3z(I3?x(;wG= ztv{hZsXwJ3)sN}FLH_sO>A%;XhRyAN>3`6l)&HpfNq5`wXgad5~SjU(^56h%7ABNdTK2v*gRnn-P=E>e$PgN>1< zNHc1vwMN<^?U9bisz_&Kb?ofq)ZFY$)w$GMdiE6AR%$MFs(62D`ut?kfrYu*_@o&T zn>-1Anu<@(o|&CV-Bvy|m7begID2|JbywNcsoC?BQv{BknwrFPvHh5Il7I0M2P5T8 zVkw!TEaeT4r_5}zlz^3=2~2v%8&z=z{PXOjxkLFa{+Oa$PEO9n(&y9Dr&95>y-C_U zXWGmhId~|NzP0>TAE@HCGjplb%=F~Usq|EAdU9&vd@43A_b-~pJ!3QGs~Pz!dSK>E zeAZ4qD^pj_-m)-rW^!)f?DXWq`PiHpK4)HhUj8bd_wN@wZ{|MFXI(JQx?mb2)7; zXUydmb4i=at>$u@xlEhOS#y~&msxW;XD+v!%bdB)o6C7~SumG7%;ipVxyxMcHkW(k zQjR@=)w7@Fo#b=Pq>nxMqH6L~8u*w`n;Vr+$}-4LWs_V3GsX1$G;nA>Lw>`qlFuc; zuo-*)mf1P;IyJL%r%tENrf1SKW>77@I6FB%wJ`0ziSzgj9lfHQ!XdsrgU_X>r)P0L ze2U$j;-zwC;p|C@u4njH>8aUi2*CIh=V{{go|H@y!#d;7*!ep-Wr-{^e?C3CAj40m z?~vc;(|7Urc^p+vSdyY_&VllvZob22qQh>Y!*(KGIT3#6MEKoIgb!vS43LR1fDd%| zit>A8cN?7D?Jq~w-PstQ^?xd#4Xo5`?n=$(!1>EiKI<<}eAce|eBkyAfqN_j?y-=2 zkA>`eqyup209-m3E}aDzpES=<3CedWjde3fNCjpy=S;q@N#)K4punCiOMAyVzQ@v= zJ%*I+vGie|KSS9*Z!~DFklX`ui65}A2L!h4fHy9Fz)o{%V46$4pUW@vzb?9rwCbRt zRR`^a2hD^BJ&5>0JK-T8GIm(rK6Y3hrsyyR$BvkwVC&7KHqV9zT)FTLyy~OkDF=m{5w=w&MH6D1Q@FsX&9>g z?4zJ|I>5utuZGXs&;G~?%WcgMhULoVx-7nHem2b4e)i`mH*8qGuLsvB7l>@PGNWCLjcu*^DG0 zpR0Xw!2A|?(2C20e;f|}acS_6tAc+V3I1^`_{Y`3KW2h|+!p*JxWW~4!C!(1uLxo^ znwmKiTM!ggW?`q=r-t%&wZS{dH>H%-%x}R*GT#L6F5gu9GN=4qZX{FrT!M#?5AlVW z^vKAb{qfYy{JF`g6y#R9RtvXJ~lmj$K-9*XHs)# zA-bNNHlj>CZA35V`5OO~L2ri_W_t4c477ciOko?iAiwCTbMxo$of(IS2lFkyLEH=M z194ac=#5(g{ar!FO~Vqffd92ayM8+?0$+pHIu3pGThQ^JfJQo@evJR^fbLn2E?3RE zS?z)z_HwlwI@kkhAGEOFRQsuesY{`Qy;dEB{`DqxNWTU8*X7i{)M02}52+*2yxyU% zgx2*=b(~t4x`tYpx{f-Rx*qz_lduPvcN>A8?%u6T>8r20JOR4}-w+N#jKRP1wur4h z4a%U}rqgF&%8`HjAfL^^YGK}=mJ|4oDHYG6Mg=Fz(g|6w)cS_C^q1lAA#mpbp4FX0zj zi#2Q{oT~_HK@-?v+(W}EqG4GvcVOfWViob<6@hOec(2X)4$-Fg7fwz5t6_P8e-7Uv z`5zXX;4jtqUc>fd{OX*`f2vLRr3Amn@E&LIJ#9p_^6As^$sftLqaBnl@Xe5EiLW5EdYQYdH}#o0RU9D>{o0ZjFq{u z+4me;-*z1T;TB$x!|Gf4txNcpiN8Sxa|6j?W$)(oy(p&d@!kLcXd(MBj}<#d({CQa z6aWD6&4m^N)&9w6Z|wE$AISH-0HXf@dJOyOU~F#=0PKH@pw!5Z^l{h)Nx&Z)( z-|K*S1pts?7p(_Iom|YnZ4uPIiSqve@og;ce=&pv03JMya}AOB9sqD)-!TFJ25hRn z;{*UqI@>yZjeNd9NPz!00So~2e-;3*4O1gCBO~woD@rgh+lw#Luhqs5W`KACND=_h z5)Snp*l#z#JwgUJ0HFR8zAb40+<@;D0^DJh|5N-ohJ}jh`#|4BUtd#rc~Wq20h9>q zwmFl@^Kk#f(AY@dz{J?t7_hIeZaY}GKN%WF^J^UoON)THftHEMEvFlF z=_B+MKLj-d6budkm4{;UJ+^zQyp^6gTG-B2XSKu)r96rK0x_EEZ{}tj$W1x-g{GDm z40Muo;lhz9@uB=^JH2)kTG(Rf@si34=#DZq8RrahS`qP(k|qsKkvWY@qrUv&)#L1T z&o*h(-910ot0&JH@7Z?gI2$=bnT|g$UXmdUIJLp2%CeWHu3fq zFF@-+O+-WKsoH*UH6w730S2t;+$~GftSeiCMUPD=T^QLj*;KA`)|3NZoOs7d0!0(b zJs7U(D@HZ~33haax=a0Bb@p7mp_zW{ZAHsRu1AE6{&3;#*!A3Tlt=#Rp0*(W=sOu7 z;AIT`=a(8M{VlGZIQ=08~7Uw(e+!Sisa}s=fT2x zhOO+zVV==GYpV}dj__x3mId;jBVt>|!v1>{DFYnc5zSbS)(RgOgd-p0iax|sEJ$sO zljl22PFM?0x4~zbvbCE)rW}LT_C0@>hCe#DaK~xazRB)DV(*k4xM?m+@A~H*IlETo zE2Vd}_<9eb_u$?}9MW1{uK=ztg{cj{THAmp=}QY=xs%T8?uw~e?HWEs+XOvX)!8kx z)Xo^D%+J5=xbn3Fj@8*)Qp>IyhfG=qD@5KAdIk2c9^GP48b zCT@_Qp|a22v<3wj9Gxr2uGPGZC`pj@>*c%5>~v|N=^dYzZnbYho*ihj+REUZ}0wAkT^IH@Jc> z@*O@c3_`lrawo5;ED48NTx z>-YxqkRx05g6GlrL(kq9NIPc?SJ5$W?ytSwi;p*N>-@3Owav@_6gTRgPd?GFpVbxx z_h8>MffC(1be#=a7Fv0_%9|RC;tDlYR~Rd$t|zP0xjCOw7mh;=rj$l=zFy7igE{RY zgtPSmxlgxUdB^Jk$qLIl15Qj$uU#CtUqYp;1{;Ra zGR7vDe__Tod4IkaB8W5Vp7X?g)n4<|vwa8b#f2`skrr#&U)od%_ODYl{?K3`|AiG+ zpX%uq@8Y+lG--HN!G@q(^W!R+r}Jmj78B=+yi=0ith{q)dpA(PLAW6;d!O5&4D(dn z7slXs+F7ypN4K#4L>zBGZ+YjT@))Fm32sV>&L~WAMQ!;AH~EZ*syon%b4K26E68T{ zt4!iU)IGm^;_68=BrvNN*t`5{@x>fS9n7P>y{7m}^Ub=VVq2ZxZo2BsV7<0staVv) zj~3D}v39O?DeXe`31J-@rGcL*J}WDZZ+%I!PLw&nU}CN=3<;;4?k^SRb?#)ZOwmhV zcVBeZkb=PxW!Nn&c9)GR!DXLqm&2>$HEUzWpxzPl&e1&_+?XYCwANJZ`-sD1opRBd z?b#b!*ayxeh`CCU^Ab%okGblsFd}vMU}hhR{9ze|v)sYUpu=-1g{z+G)HpxKLwWWR z+j}Mh*>pmYHtd2N2elS)X0_5sjSl-F9{Z}pvUQD)Rjv+=++Jp_#zra5Uuuy=o+=2N zYCm!7f_=4eA-Z+7%G|1(spkcGM=3i)NKbonlfNC8+8Z zgMrfSTh%Yrv5@vpe2d;!>%m0I5HMrOn(tUnhU<6%fOQ$ zCaEelQuHI|R0OuviAJA0R!!j;e&&#_!!dsL=jhk>?qnDOx}i`1UNLF%_85^bxzwk!6G?zLLdH^~FR6jG>RzW3podCm(~`IpjS57T1{1Z5~y> zrM^xT(zf}$>zeoly9aNo(`PY(Ap5?Ij6e)7bOYxl$qh`Lf9ua*bP}VCD@<>KRGD{> z1HGhL;G07O)58^epFx#ot@}-=kMG?R@8^N;Nde@BX{(G#RTBsBNxz(%y^XO_AOYwk zfc(bHdWtt+a*nIo8>B zgJB70LASrcW5oSHUPRT|vggx9E{8HP{w5F!Gga~#j=N&8_;fN3T_Z!s&(#xS8<$7) z?7b5m3YbB0jv=b=MGS%w1QE~#M2!9@i=WdmH6o~@XMQe2?=ZLS84p0s4pbSY>@H5=P% zg`Av6U%eI-1-<265A7Ptrg1+-w#8q!HC{(K{LSK&|0pTgr}U_ZBk|r{$u>VXoTWQW zN6eCib+{of{L9GOa3Y|+%;22uXMEf}*VQp}e>(|0=jeQzvpZjE>i*Y!vFh$AdDe*I zZ|lN%JUAem)swBidfbWYX0sZ8X;k+K?CYf!seX7WZno~hhm{j2PhxM_NDpiic80U} z%No_GJa8ES%7$CTi2)j+ZQYOkU^;XDKpN?L5+&xpdmtlP_;ww1~jCZ~^_$ z-w`gdH?aVKc4z=Jz?7&CmQ;kO4nXQuhb&G+R7YiGK@_E~RR=3-N@`LELn_h;0SN|5 z+Xw~!UyXqJUE|inz>11!H^5PWimQlx_ke@a{+G}L02TnlZ~Rvqf?j^7{Kp{o0KgX{ z=-byF=rJhB*WKs)e|djOO0?#zC)ViBtYwBA%H+J&FJuua7yb<0noz1pk!+5fgH&G! zOM4UImHsMuL|zs-+&EyEwE`|hD!uMViFP9%bIS~tZ_Wjy)^XSD@teQE?D0oe-qYR7 zW76kG{Y7saA!%PDQt@4>7V`%9+nk@!x5TR0&<$-KKpoN~FuQiD#J70^BMg zD&@p}J=bg`r6_N)2V}h|3$h@)$Zgc@r(k8PZ2C5kp03D3(-;{lvbVa4o3J%PF1m&Z z8woo{1xu+~$XypgZ^+kTXG#832;)SR?2-OK z5nhm*iHk}t+_g|<+mLj%fM+z_CG1418>x#U^~B4KrKC*FZez3l*^4&Y?q#De5H9`9 zk?|pRM&`m0Nld1RykugHW6RR5SUQUBmG@JV`uP**T+UBke2310D;j$mCgrHm zpTU2&DE{RnTX7bytkH>Rmw=9<=a;MOu0a%%Y=$ro0sUa>-xCAjYW4d&o_hG(9kEH9 z6}iiPoH`7%`Ar^+ZUO0yU2^k~*h5E=!Of)^W5>qurqGY-rFDs`tAQL{;_mpk3#$!~ zQ3bavlb%lQ&hYGRrz^@Up#rra%seL~#Qjm@=eQt|F0LreQLXqNg5x>d;)10&`|#P8 z>$D_~+&y`NMJ5)ny5OU>&u#L5ljrYnDOtzDyBTZrao+wpJx%DkG4lZYk$G616G8WaOCJv z?FYwj5l5w-C4;H%#ryG7Aj;6Eecj6K*tkuT%$QGbFsX3~0TH8xj2JaA1o=`JS4fEC z6bzu8<}mksk;mirS5=eLr|BR|iCnr+0fXt(X*0oQ8II^@ zyVr~mO4n0g0`|q-+(3~QBH@;VPt1cm)GqA~#g-}67QbRUh)fL?OY{y6^+|)#Ecp9w zWoq5n*~;?HFn9Xqox)nWvznsOhwSw}1C9+2mRk9xDPd7WVAl8!=$FP>Pe|`(#NWcZ zZj<##Gi?I?%b-BYr(nks0ikn;*V)l4e&zZrOZtecM`uj&JcN+7P?LYr;!N1pfqm#J6_^j+Vm`mL%bl~Zx)yv<=f0`D~ zP?t0=mxX%-nU&+6@jXz_FT7px*Sm^rD^5_K`&nKwGrgy8)DL;fV=!$gF0(0P%QUH# z`ox-+EqeNb+_h@lSK#eF|5lNH-$f?d?lP`AB?6=Of>?V(U1BE=aeh2SCme82v&*O6 zS=y%(uXf4Q48yc*WFt(a+Uk?7u7Dvsrcti;s4%R_)|htaQ9g_O{M&(iCDVn=U2p<- zVCUKNK$UhLnYwN5;=gtd+wtS>3&e%|n#*hBUpI{3A4zv21NEKt(pQ0H zdiU0)YK?P;a$mQYrOdbM?8g{OzbSr#c>6x1I%WcNVdcICg1<uELP+8>QmY;NAQJHMQxNSfQC4NZ#p2K4joqENy*9W9k)ML0Q)ES0fyx!b@o53}QZ#k?fFTYggQcQ*eYGy&DK(N+w8x!u-h|K6f%*(ljl ziCm`VIHt%a@JyxcAHiUZFd0vO1avEBh6+)d6Bt*DE7(+K%qE^y@qFy84E*Qj;-LCg@!E7(| z7m8mX=L@m!-j$%xEXOU_x?CVnirkPfV4%QANGV%39ML%n+9;6k+95@NhM5AvM^Emq z!buX0Wc(RT;+@40DP4L*-d%t31-OeGLlVK(A{>!}8}0-Hjn}~QUVWNk;1sjxwMPlA zK#KF8ptKm9B<6XJzP|!QUl=7@p-c$|N(K{kSB%14GxgIftKpep!q>n#qZT^F3_;Z% z-W~9V0){6{Xuv;iD81rwRh6qT_aUm47?&#FODk>&tNQ|BcLY`g83H}t2cF$CNI%|K zBxx~6W(H^br*1w)GY$?~=NT{F|E_i&{hMig>I-!8M(CQL;iE=$Z4>)!K}oNbkBN^h z+2?X?_>*zp%Emgb)-Dr?6D*F5$V4;V2cjlTKBt^fCZ<8{CPgA7g(gT1Tg5!$hM^gI zs)Y+xWLE`6k_O6!!`u0jwn61IG6YnA@I)p8T7%jEwF7HZDs{LKl};4_vQdj}+PwH| ziX|&6$EX;_8WDpz&Xx`%UO_6fEAPXkfiFpqr-S<873#K@3V{J!)D`Zo-mW@ZF16%G zwB+#{VTa~)Z((+3l-M!H^zcGsG5ANN(-1u~dHN2U#|b#Uuk!6l>nVY}23Up|7i3fM zy?JF3Cjq~2_~G*G3BiekAu5Fd@k3==F;YDt$jeYLq|6_gR(6R=(u_~Bf#YH`7r#F` zH5e`*#d6e%3Vpxfpt_w6^;9FmeO{eb2AK9;2`1jvDCJ7Y4vH#pQ=4U;=cHJe7%)@J z)5&GfpGEIj#OKik^tt|K`GUF9Huh5Ri0|rO~C-!bBG;XS?6IU;k{3TU5BSVMDE)Svs>pkC9F-}qzcIO)(A-_+dMDF9?4jK$Hstd^96ME&jAK*0Lz0YN zxH&B@&d4Y(KBb&SnnnkmKjHbJMDsJ$NV#}=?DQZ<8w&m4AJk`(ykHC@)05=^u(){grzer`-)X1^%d+5;)u$EH_kwk{|tw z=LC)pO*%QyjB^Wa2J&p#Wq};ap$Yan#&SqBuf?7T6(kY>iZ#Xp7Vs1Ej)Q+!`ixeR zb?@_jX+<@P$ z59|Vo16S`+1LTC9$OO|bC<|z$-{idiILsh{wly96DE@h|>McvD`{XosZ4cxomHHm#csU0V?z?Map< zX6MfIH7QLh-D>e^6^sH**CV=$RaHR0_r!sZJ#oETfgo7G$JRrJ)r~1NYM0_<}Bc*mroStOC&{;Xqgeuag6=O!&Fc zNaR@6p+7cVPBi)L?RfTVUm*7(1+4q{jCY>*3B&P4CjC?E53MJtOtv?dus@e-2(>zb z%P}J^L8b#htTae6Qr*|Dceh{}`uM=)lz~kO+w1Qyj55<{N^!sV&0!vs1J+arh*-0& ze-)z0K(ZvNG#xr8G9l=`f}d~(Y1O$BI&3i!SckG4S(Xi;Lr*{vSr8}va*Rb-w)&+x zhwvOgvsOsWBMk|KRy8|&vgSr#OjW+o|wOhPu{nwE-5gC}c`h04>Kf@;_y9y9raa$^$}kB=Klki%XZkhx~z z(%x>PlvSx2qW+q5G^qFvOtZ-28o^jT+fUJr%-6JvbzN69SavFs`FmH^P}~{cCjhyL zPb#gZQOE%km9tUM#YwT5sZ#T@x5QF&SV9M&$e2vIm9I6=N^+S8;#Du7F#0yQri|mJ z!Iq{XWmiC2(0qw0vYFmHkqyz@xNxA!+meD37l85i-uHEWOwg7X+yM2m#bgtQIe0Sb%53J*Tof5kZoxPVE zX{f+nhR-F64pN9~OS#l#;|+JH!d?};O(Pf7Ozw_Y>-D(taPedMHfs8tV*8(&cjE$s z@qjDU%CbvS%}PSsqy^O^6KarKke5>_zQ{*PE{o=}r39B-Y7#%|a1MT_*WnHdk$_3N zLk-A+p43nP&@8}q52;b7Ku`&puvcKpa0-2t7O3bD*BD^?A{^gN2Hn2mSS@GRzamo0 zxBZAE896Bx0`}lcs;9v0(T#TK6K6^75$;_{#jva@#bhz;)fbbgX|9>cAYiOvIhl!} zacILTMSo3ZfqOX1dgq+MMEwcb)e+k+Dje>$_& z4@lb|r?EoaUbpZ zxz9%N#J5wBTl%shU>zx=_e_zJqndBN@0 zffK=Cz{e8k`dORQnDY7W*^W&IpKvT@dLjJh=j~%w<@ozj4e9WHqP{9?YnStv{8g`| zY~Ie%sCYheN>|TbsH0lpK82a{A=(UUYjk16fM%9hJH!y16gK^?uGY8*FY<%atbO~s zIvq;O0*wrX@~exF{$2C^%aLPUYaY$2M`QV2)|@Jxn;8@yed zNpXE9sK0|I{mvulf5Zg;DlUAC)nHagUCLE6rkN|}u5^V22tUXb{3PAX|4oN0w+u+C z!Y+R|GNX@$x+O#v`bE*|UbME@2m?ES4;b7=N)9YI+$SJ6yJ@)c+x$W&k z{WlU5wO?&4r3_!!9685ffh?$tW$S6Tcrrc*gwH@dj9|5%BObE!qPE19^iLkpan ziRc%G7hal0xZpmf?_n>kqVPibZMjA)0o56~bcEDB`wV<{l^wGX+1b5HBbx*oCfrky z5)T^AVcis9VPAYw0EHYPWb7SL9p=TWJYlqDB0o49tIuH0Dh`|75~qm_U$B^@6wST7 z9JnfmPf2KquY7lG^Z4KYh5BBhy9m6X`o{&NydhK|w3ZVMJP$nY$%wTxEapV#FR;PW zfq#|GvR{>4_2n_S=XhE}GX zIy&>3$MUURE5smRiu7aDPOd?ZL6H;Lg)MN)5mV!K30HxYo`Ij|ZxnM!V9H^@m1XG^ zw^XVd_oC55x~3}-Iq45Zpi2Oop+ zX`Uy9hg1Z2?Eaj4te%ur{V}{(Fkr-iapP-#G|UV;fORoBNPC{B;9OUFY;D@&UDjAFO+)`yKJ00~7i0dGAooy54Y7PCQlIRA`Liv2f%x@b9ex8BmqRVQCcz}3 z4;HrH-lW?SVI0?%8b^9r_|eN@;iIS>xmdUaHN&@;?<)l*6Rjgt_LCO7W_uWh4g zxog?8j=55Ae4h8=ro$Jdn3#%=j3Wad)4at6e{s(@4SoG6TAg-NH!Un3JOB2HFy#1o zbFnns-B%Lm==-0jmwLvToxdq``8n@|jFy56Vg(zw(g&Y4bc2oGNYlV?AbKniz=gmD9QRG&Z4GFk!SO#*?rFkJB>PZ^@2!w{yeVYyU!i_Qg>Fzm33F$z$0hx%Dd`g5$(jW0OesV>D<>K5vh;y z4la2w@H#&359*Co-fS^9*{^xL*TN1N$V^cdrfg;^?o=M4^tG=ydnHUAMw~^7Nn2zh zI+#M_zXKt~J2E9_gwf74baS(vDvQOff+>WG?O9*XjiwJEv$s8^m(Tqxx<@|-MVBM* z7uNL!O+rwNiVb1Rqo+;hfmU|rxjlld)>kJd{cg8z{_ndc2LD)jg;I4Ob+9RF9op_f z#=Y`pjd!Td+papWinIr9`qjyr5Jq&wAW!bpd^g?6pv9$-O%QMp1ytQCB!^MA61M_o z=k1T-z#3Im$J@6K633n|+YgtP{t4~9w-i0u%`Utl_B_N**Z1)H3%ByMeC;1iV?M=? zU*=!U^sF86ZquxQEewh)AW6r_UcdNB8a9Aww#%b<{tbc^U5Cf#R(|2-YC2vR=X@8nC`3AX+}{1v0a6y z8t2|HA;hv?EEJ}%XgZnJupNQ#moOTRE+9}=`|YZyJ~JO^6A-t(1eE)dy~8}^vX%Ml@J7-L&AJ14_mzqWv9e8{n%~H7A8$T^Js$ZVZ%RPSqN2KrHXoM$pI_ky{=}S z0+ZIJh-*c}i1F-MdSAf5=ggd)llEzQLHQh5@!^I!FaDKCk_?Z2+in*~2L778IHLRn z;{Plhi3KifgSFCQw=wg}c-xxCw%(O3wNpq19?`gEJtpE~Ztfc1D>7XreSd9JXGJ|W z>no%}#xT?tYIVik6a6;J!g7JVzQv}Uf84WI&AS9FyGrxFPHZ9s(DVtmFdph|CcC)2I6t0kVdT7+SH$>7^dGJr^r{$R zpD_1_$D=uO;9@9etz;_t9C~hL-yD_HvnVci*={BgT5&n`O5G_g=YxMazm3)lFT%Gz zWhEyxImj;I5I0SwyD2#hz%@_cnW3l-mm4Lw?st%03ISUbrpoF)bmO4?L}7@*`> z;V{e2Z;n>24{k86n_fJHAZO=9r@!X9LN1wJOGUoWT+Why>TBPjwuo#f`B)R*1VLMq zGC>Kg!dn**O13YEAk)#~WMo0O%$-@D)Eosz9|u`x9?*I}8$7-tICzgNAMIvn*N(&r zDGQ|hN+s_(GTRtngWA;H^B<9%hbBTh;NO72Ef`>?#z>NjT?Q9Ha*Pqc+SREuOpfff zFsB*Er=4SBLBjmQ6cfm$hk!9~uZR>N-F|(|*>iIZ@fYudP*VWmykLAS1$onYepS53 z@}0%=r9EOVyg`Df#HD^Pp9x;h>9^KE+hp&Rh_N?rM@kQJ9~skSnhh}{FHJqJ|6eK( zWU)1(X!bAs&I=zO+Rj)6gBJ|9OI;@f7;cECR8Fjb!6JkwC4m&7tW^6=vYbm1*Jame zQBO-^=U3QGJGBQ4IEU2w2v4ukfq^*VTJLV!ru&H>lm_i+EVcJNq0vwj6o`6yOtrqb zUhAp!jJ_tbm#w=G^r=(LIg8dfo=4r9;3Q&^{IZ=^+y9xr?WO+=Ctw6JP2N|Xvwmp_V zi#L{vNoetit=eY1mNfU(7UlJ||5zy}QgX3R)2og+B@I4>oMzkKa7XMeYPJFuMBQu4URC;z5SDOlm z15o2fMeSi2%fvNepAv)vSsq5+^P3L%$Ay*~RJw@J1@6?mDXPy;Gj3Hezl>KiLe1drb^fN7pdVJ1J<-(0GAV@k;Ey!P2K~`Ymy3$>$x}a zrN@Siqv|(|IMgUacEPMz!LCW7VwA=R;H_n9VT*EPTbyFJ0#zKjehRDpR$|6ByrhQ9 zR4%1tw}I4+g4jW|)n>PjhKq4KKUA5)#n+~qA=D+1tLfU8vT?2_kdBpSOtEZ<0|yO# zLFu?y8CEm*E0dgGSX^C}bXXH8@eZkhyG&4wMwq>~l>8kq zoRYJ_oU+qe{2Nfua@fBLS)GRfn6^Fy%g(bMN+6^c2(Id2r`qnHVWtR;;nwB)GeXxZ zJ=C3C|74`As)r}?gGI@7#%gR!puEO}1VV>8fAsiycy@uXZZsI0K+scRj1S8A(h$*q zEFsVK*?8&zq_^ZZ{PdcC!jOd+eE>ji^Pgx$U@YfreFmwQAD}!#W49+^*3N5yn`b1V zPeC;gau{Q=71~}&N}|7$L4%S*$?h6K9W(yzt_If zN6H#&ve+f+sfST0a|5>(%iIace}000RT#fs-d%M(PZ;ujEcq&Y?9(cHKGW$2)ev;t z!n-}W2Eiaf_?5y~);@bv<3P>3)zjrX_ z%}FY)1~Ky8OQS*JM`^2x(-x8=v@j#xQ5b&4;G89%=&XTI+vRb|I)7Fpf9%Q<4|=S0uUNoHRC(afwZy#kJSWN&)>U7dBl z_pO~tI4&-C$Ys=BwORIvB-N|Kp*K<+Z;V9>Y;zvvF899FF}Xw|a9w?W;XRcmJwsf* zhksFddMbcij?^M0F^P?cU}xE;jPXEuZpYsV=l8rM_dx8UIKAuBP8j948^IxR)~=)^ zDd?ty%`r2JIoIY;y=pr)!LSY;;-4X;;S!V5W{zZKi-fhXNXVAfx)G74T{`WtY<*u~ zwD(Tc_qiz|^q-GVRkJ=gW5B8uXU$dNvVTvd`03$9fV9p(@jELkWzR`Eh3c%I#$-SpIv*&TF!` zK%_+w@4gM~?%$9WC}c6t)0>bQmV~U$l7=s+y@d{Ii<6B#menQ-Y2zFW38>8^y~kFK zFpHilCi6USV1#rLKiIba=iruF?U%m^(I47iJs&4-m8#(q%88zyi6t{D<0~zvPTSoM z;J>(eB_p@v63u|{2k|5UL!Kif-yOF?JrzBIzkiPA{~n4JExjw2JCU(AbEj3SvqViU z5DK%ki;3h58dyKe4vR2g$NviP&n(O&<-s0iB0@t8E04ivr>khORk~MX7L||5r-w>k zK#$22aE`0AfV6K8;Tdvcj`}#cXZyn$3T0mnQW&8PP#3X)b@sBKdf81bu@Pn!OW@5F ze((sRLpNssP#!pTIwlX>@fF}rv%fi-e=5_to3)3^kwM}#yne$Y;HlS0H3AAkprwiu z%Crb>diy&_svKtz_yV4-xoBsqIWfwuIksDHrbTS4*fn1UA@BumPhW5 zZw-dx!qG}vn%1uu(Ox@yJsn<)O?;4e(C+eL-)H9BLViEanKLGh%lOo%bOchqA+O@8t6yBg zM_0oo%cq>bIJ&B`lE*i3nB&7y71vcIe3j}H)g!Vx+><(t+?mNO{$1cevh5ctS)VIy z6hQ-PE&++kY)ldZKQAw98%q^)P1X@t>>1wIuku(+rPJ7D}3n+_w?yWeObqHb= z@o`zo9Vb7P2|twDm=YBf&i-(gh> z`)9}eMkC#&Iz70<3F@X08B!)E;Iw>&&X44!5UFQ3{b6td8AYjp0EKzsG+v2?ngMCgnKGS>h!S}@`iaVDGQcirKQT|TFn3i z{0Ic!HnoJXaBB|E;_+mZ9&(ary@S^7tH%<3ddr7_*e733)WCQr^#4HlHZcgQvQz#chZM((F#cSMS)5zMv1X_2KCG4|~hq z<#A?J0n^R%kE0v%Rq@E6VDEG>lz(xsiw+V3g$hhYZUlUBNdLVO8DGrm-u2i_J4gkj zTiVFL>sj1*b;?*z(E%Ls*=hVx*n2syWqoLg1DvEY{RP11{m1TKRAqG%CXHg`lz)yX3}9%QNLFQci*=HXyTddr}}5GC|!b32AREB8_) zkfNVfNQ9v1#~|&5r6VD8KOuac#0uJwxdR2dliMGT;O^y!4?H=qAn;K+Zb(Qu z`rh*PwEk5uIKAg&m*kqgD}IrF!B33R0^3fMKlYSl;jLC^f70cAn8vV9CM1+Q4+JXAw*lR0J5Xwj!cgwH4xFNg}qK~ ze^c^cnd;W^=^E6KrRN8ZwBa8FtYL)ZCmPXul02k~U@NLE05P zgo;%PKyFkYD%wj}jm4AZ^sFo>pfgf@>{l_eqB~GPl@FzaGKm_Z3#1SX54Be>qTl&N z2RO5H^uaCB-DIs8NU%+I+#~sEpe|m67)B}kOlA*(g>C$-j3ZqTCFV>XgQ~89OyFjm zVcoD+lWww)@|((_3hbI99Cn6eap7P5SnvM3Lw10R(dup_DP|3=U6d4Ruin~a*j2@* z>AfQ9e35S!;h#~@w=Fs}Rsr^_;@Kz3!t!7JSG}-~$P|G~{a4OzP(jNqA*^t&%rYkq zrA2E%jPz#uHeY)LWg?6&>-u!WMm&8zw$6zM6KFO7gH_Y0r zy*SUzAC-gW&zh&EHYSB^EFh?oekOm|5|td`6bRN^F2c%Vka%Ey2 z30P{QOFht{c@^jxvPaIXRaZ5IS4;IcZ66n=g<>u8AGJDqs-xO-I(TTV42s299YGGJ zdF8;i~qF>%+?IPnQI(mk|jklDN=$|OT}Bx?Uy6bDbowv zHq}o#lx%^U!krMgIuaPQ#b~Vph#43%#I}7jQz)&$QFW?}h?qYw5|{7qJ2X;t)nbqE zBvOwg%eEcNP~QJ3$Qg*`I<$Ql^iP|)4nbszaEu6Ho9?&j@?qCy~;+78waaLa<#DSl7vTqs5o5u+MF++J?s0BpvY{Xkw zj?j*K^*L_FQwSSmMh%FG#gN7zz9o|DzIX#~)O>X%l*Z2Z4ygtbW3Yf8vvu9MGKiyf z>1Rk*i6SLE{(l%*+>MCUuxVc!!%G@SVOntrChRs0Zo@~PmSCY|20KSsMU`W7Z@{}d z{*XmAw*T7~lb8pdSu3vLDg^z(caoKH@oMM|4B$IJ$i0W$5{^kAwZ@9qY-kN0dVNE1 zB#EM~0y3;WLM!?NnUXOxgVr>b?Hs3QX*HlDGvsU@6yo5jRpVksafygxCKiamWA?Ei zmCKo;5|sZ)Fd<-cAm+I9a1UJ_VpcIVb|UYcK@Z3UEmd%YL9zvlPWouw_W2yAl;sXo zSVhAbn?+chaycvb%;Q~oXOLL1!rTfr zIh?85{nmSM%i>~B`nDj zFxy6|{c{z9raDNEW|PsO!ErMhuVw-BarS)QIQN~> zhySq{Dc}S7b)Pu?;hr=rK%j<{n8(U0165TM4vOja2F zysWMX#Pnv^-KK*TPj9$-Zlx$PlFyUQ0Htn-+(MNMobhROt@kwlwBy#thT`IJmg0Q0 zrU;u@FR|atz3Ch#X8W6t-{>Xc&JDZIuXa(cn0ZDO2A#H7f1fY)3diDbMF;zULo$Mn zULMg;xlB<1OQk`>u{~{|)zGo?IS~pwM)+?zl?3rHHly!E84h+ zuXm#<%GGx>`h?H-bYN5klK+tWlYQ2IUu>Gax+)^Zm_24)d?A8s7&8h4W$1r*qsWVA zef@q_1&v`kJuAuZ+?eFufo#z#eku~ca^8!)N9vCAO5`_-J0bfc{Ne)+lQBTq2B+uv z>iW}g=lZGi_l;5BPBa|&Td((6i2qc-l?5v?R^$qhFkdR_LwnDo&&`d0erMqU8SdE+WJG|$XZzjmzQyXUvGAaYfEE%=TrOD@ zmQ4|9PBDo~+q$i2FKCDSuyVu;2zS4W(twT`Y}E;ECYeRuQahAb5+W-MH4tjWl|=to zN@Fl8*|B&PO-2H96>tD66|Je5xI@7SQj79Z$Tw6(bAA+Ev25F{qOQ)aPWx(Pd{S!} zlIZmX)mRLizKd>!$4HzQ1Bd@0dPC`YUd1Jrm_o~N$T8Cz;b~+FkvwzW8+lPK0r!i# zhR@NcfxlY{o0U2;hQ*HFA~KgIIKWoLJ1+PaT7m^9FU#4oHWvkfkeB`whG4UvO8<5Q zD-Obq1PfI+Id6y=2sas%8>^sMN;16Id}75Q@|L+H=y_gU-Y)u2^7o~4&NYO8xyx&? zDOOzr_Xhyll>~-j3dWy3FNy1j?G}{rVWj&;Nogw1#-ip``jFQ?Yx?KG-6|gIo933~ zI`fybI(Mgl_?OcO{R{7kjlf@gVzT!j4#!Fz1P;<@J72?E3O*l%A4t4c9)imbWa81O`=K2in2hSZPeVV}M;jFR2m;u7F?%TA zyRZ6dM{oGYCtrLe##IU9C4da){NdHf%`3rmO0%2>z4cyCkp>ql=b;v!!)Ng|xc&=G zqtgM)#{;!-epkMUn&HQ1UEvXD94#Wfa|D8i3H&5#WVm~&6U@0s^KpbWy~oh3Of%StM)!0__da_n2aXSh*F~8-`l0&OT;aL9&wS= znlXMHj11VAI<)Yyuq7Y1D|k9M{?p&;r{y33W2w>M)J z`V0MlirvR*>?yVImQMHxpA+$`p>JYA0Jf56B*gS4y_1&8IxlURk9RX3wgg`F=?EN;sT5{l6ecCZdu6KIvK=_)lbI|0qTE ziPboz!Th-71Bjcbb~Z(BCm90iJGWgkJj;kPFB@6ONtg7F4d*wY1EBm?Q@5&Wt$;wULWfVMBJgO2S5Q=P+vQ-radbP1LuC!%u+KA1so`jZ8wp3{bl&nDd>cT{}NKRY1DBpwY6KhObXmzn*Qx;aKhdLa`%D0*}<*lQ^3AN5@+7rrBIeuX$;h zPY1#WAJLQd?$f=ctkY56!1T|jBTVZAeurG19=FqM?-Q)qGXwjd+cyL~wLlV~+y4PVK)t_}@9GFrg!lPv z{ysN#d!L_B-qap4C4Jr@WH1~XdL4h4IC)03 z7&U%|flK0Wn_BvmOxrg!P92WXEjT{Y>l}T1Q;%V9nE;04PhV~H`8{j(`Ty6P`PV2# zI_K3XM)LgZ-vj-qNy23D4QejRDhTMr^y@kydPI+) zNQfb2_L(D(6;~GayCfaNDNJUK8+h4e9x+lChT#FA9Sgs|_}k7=xE`ph4b){DsAoDr zu?2$b+(6CS<76QQX2cHcHi29eBWV#qxM%y;mCbY2$?@V)Jn9+n4v0ucpE)NBtFV=hQs$bjFp<*DOI-|jCG4nfJc7eB z-Dz4NCVQA2$>vwA@!i^n0a_y@Fku);2zZy%mOciY6U z;b*2wOCaXb;Z*YQis^D*Ih;%#UTO`>Z2oFubUqi)&yOZdNAznQahkboHfK7da{~i8 z5w9tUb2!r1`JOOHzSRf}4rKaMNmJXltSh-}I)SP(8rfk7;DMAGoGWK$)Sb?TNTOry ztzC*GXib_8d9DYlU;vgpDz*`7Y$MzsXc!jLjU^7DVg^MDKx&z&GF9ag!dWVP4@F+@ z3`}atd}f{k`lP9vf5ZIXHI4;IKij6eU-_+;b3&LKPbJU7YhW#%hkMo1acn&?N39&M z@b&EF$v3Phv(^S;A9dUlM`P?p;%rPuN8{l_*h;B)+t6b}^f!zyJb2|Vnef#vK?m}{ zZ!6@p4gW}~SisZYcyz{+3Z~r}aIygpJf%FyWQqI8t>l$F)eJQfQi&8yrtsYb5Eju` z4`>l^w$us3kJq#VI&x+@`H*=| ztO9!ZAo;UKWFQj>wbt;lGKWqlfYm1{=!8+8U0HZefEDRh%?J#IvQtr;%y8ZY1P-PX z%$SyOqQI0Zl89Vuu`t5o+2MedVOZsyjRG|msv0LV%(vn7A-bC?!4Hv(g`&k&!=J2(7y_3|cUgOUMF0`_ZVc}8 zpeHOP)Q-hvds6EHggaF#-2hQ4;W20kodz^`MVs&*9flJVn_G@Nghtg6*cPU3Ga?1C zx{+Jggf8ROVTTn7_lU@cM}!%Gk2rczO30x!7<^4+cP`XFn11x|BM*M~!w)`k_@~l8 z|2wz;4!w2u?2FA`qQ_r63m-(SsY>&Q>;%wdjGUm;jTj*}9Y45#=k~eTN;y9W-^RQi zi#BiA4R5|@g=s2kJANvY;S`hPAs{Rjh)xB4gz^kyKz1dNR#bxgSAtEogmH6@ z8Dc!KA~Lqa{|wwPbebZv3OX?B>Bh}J8Km7Zn!C*45Tm3?ybz0vswSm4=616(n|B_| zuNS*ybBY(T5YZ7%rYqf14) z{pMO!4-drS(ZvhbcfCsw=s{0-Hj^ESdbPf&`RUH`Ekor-{<_uCWZ*7;*rS97E6MC| z%&&Q~6WeDZ<71MCd%NsFsA0ZG>E#Xs`}M9Ec;#UC^Vul z&GavHAOi%dDk|$(nD7cam{Cqgh>ya4CwxQvJVQr1=(ge!H^gQliRti`9Xqy!rxTHx zSQ`E-ZNbfLOUb;}PVcEy_MEO6*H5~Y!;0ECbf}@i2Y0GmPTlP*?wp_BS@fCL8=?yr z4jsC%5H+tNzOF=HL2m;7;vz$sgQSof$R^{V0Qyt8^pF(+icE?CKXvKA`vzeIWF9gA z(^N(?9G>mqnU>6CIn0XwT38EI#{6nZnP&}A+A+Ykw{0W9;$*uxabhJQL4ktfGzAL7 z_FA9QW^46&v89+%w18ENioZS`&IEFOU*5HQmBuptEBV2#squxJZ@V&8-oJ(Q%frE> zOWQTEX;qQq(LV3Zes5%GX5dG@=yp6?OIGN|xIX41`p0!I=r5L?v?MM+K-KIsPMhg1 zhUpY@TEJv%qt8Qk8a?uzx$S&sYnah^Ub9^AAbp-b-$W+}4Ada~8@dVRSSL{!xXtwG zM!qr{kGWiP)zQtB&AEYCIbL=}T~Ul@^|$7bH4=i1mhMt5LgsOEEZD$)2xD72O-AsJj z1tEVIOm{1+{F>oN*e5XAS#c0wX$^ex8u-lgxnayg8UzZ(P|EiW1W%X8uh&mh~;n+^P&EAvFr}v{dvVLdn`#5udQb9iz4J;7?}q z`3(M~W=4|9k(rcnWgpEH3K{(Qf^TqcWTZCW^9|HSM&<^6Mt<@N?7IhGO!~+>8!2Ab zv`==F0~Mp?HhuS#3Yw6ZCPH##7uATYb>?xe%n$--f>^XuE)a~Xu6!6|*|3s3tK+}`I-!7%O0r^{ zWm^Z<%tD5;)|_7&E(EJm&l%> zqhoE1qa#D8eoD0HdlRE2=PTcL#Qvu|Czf=E#=6_|E$e z_P1orm*$^&=I>gP=KsI(bBrHI2QA|VlETKvkAct;q|rvo?Up2W&}~>AC6~k{E<@VF z5uA{PrB%kIM_EP3M9Dns6BfoLMu}&a)>;s{7Zm8C4p1i)H}6-|g!2^6DITW= zW1`{(2XrgiEKOJmVDYq=nV=5gsfPZ3P<<6;v^y5JRl^}{1fn@gIb=mlT63VKq5}(J zh}uh^bJlebWBg};Jwt}+!A2sR%?@XWF$X((noQ5MHNs1_&pAzDe~vo+%~m}TnP@5q zt46hSQ0+lSQ?2n{>A<1z8ph?daXHc81nlXB1{-SXSa)oiW@M_@a4a>v;EWvXiHrh` zX*xQy)X|NjGzq_}!(FnM0!#Jsz2DQiKEBx16W_eQWzEROdZq!S2dYf~$bF-NVi-3C(Qu8|hdwi$6)D%V0} z&PWgAW!S-<9`3MXKHEAO?XJe=AGg*^G3Vh2Fb{q{4P0hRP`FTew98M*|VVoCs`|Q-z?A+AU zON-V7v8z{s`F#@fB}PK9v-D#+2q`4lPA#kYsAYeiq$0zh3x*PFAGG9N7fw|by-ber zMdfL%8<>KvuzA0Y!Nsh#QWG$Q8pmTZZO}XcLdx+#{D2zNb^++7I&42U0~Q7>4iAe$ zBbGatW73_Nh`?iJRG5RUp2s+f|APgIUOv&hgZ?l22hD#^=bN9SH#e7_{wn>%yWid1 zgVj9FT{?$F9*mkEJ9!P9P2`b=ceI={s^HjCxK-I^Hp_Tv8-|WJLi$_k;h2SR<~y28 znEg~_e6db|$VUDQp&ByTG>fcJ5eFmoxE@uobgK!ecZr^$qyA2FA5AS2CS+>sxMr)XWc_;4)$#`5pBAwa}j}A94!h0+^09EGrni zDv97#GaUwodPtI8-quX+Hr5?XLfwP#P;4`?8&Ykf=4>uJaNG}S1KPo|{8^Hrf*W#j z*jc6~4TAD7I7`)_oYzbmpz~HZ0SD;x*O`h3VbM!%DHekRS7V8z9~77@bI^k z^>~@5Xh}C#9VaPV#z~56!r^K;bkQ=#9~Hu`zjc^V)U^g$GR9fRMFr|@#0thvoK7VZ zEiw_$04U@VO)wT0ypl^cB0vatS6J+xv@rYe9&idVxV?qIrQ2Z)EkGL4T^L!=0r+-_p}UmN9@(tOcIB^_A=J_;cHwYZ)}9P%^YMhQ~y;R zwNqi`@q9KFZirM`i-n~e_Z8Y!ld8}H-1RV^Vr9&gg4N)gfi)evi0ctFqDyAIkUIg* zzJERM3jBn>E3lD{cfEQE&>ZHx!%hBcBf_l^vZ++?f&w1I&ILupXvk=2>HytSXs z+B$EppY>AaMG&d3u@+LPZ3vb_89i1U^wZerlR0$CoDf8(7|w#7Ehsd#hb6)c)mC-d zH9}ikOHU8mHHPxN)bwpu@u40n`b(A8szj2gNyK?f9G1bWvCi8_5i8!gb-`sUx*g_> zLKU)zvmm`yXjmlM9&@%cs?oFdlI6r%u^a`p_3~=%1P=ML>((yf63z1Z^^5*>^cw}V zb^8t6_FtmkD!kGJ`Q1i@kV-i>fH|#APHml=8*^}0%@DLC34@P7(%=Wl> zgl8OZL`@E7jpjnj37HoDpc%$H*^22k&d(AvT5NS)U=4;yBqvmOY-P$qp;%#JJvhsH zHN%R%_|5YAL)o1VUHQ=ZV=EhDEB4e1Ss~9hLQ4xXQ@BpeUDZD0*&?|18cC}fRhyW* z=Fox4Ki5OJT@y}SJ3w(ga($v>x25OTAMXy~_v(5hEs;$y&QG!*f#en=GoX+7XQn5{ zu!4Fx=*W6EF$Dt0aNI9T3ZZ#eq&(rEnfT5ZbL3?6+W~nsfKqHIUVDLS$5xz#&#KvA z=VLV0<=HOKR%I-nRMN=C$*U_7er5zyd3JlKjE38CH*m==j%ig@(ku&LiSi^T!;x6r zNPRMFrHC%^lL2-*JQh*(W^;HR=cf5I?DI-^Y5@PBPiOfYP3_5eA8QQq&(r%d{>SKj z<-NJ)nK;f&*1U-^!6MHex6@^rs^s zk6X?uIk9u;c2;9!N0Nlxuf>43GJDfftRj4NJLJAHw&_be%qzby=dID&+V^iv0#cC`1IJ;J|!kzERz&hou{x2UlRLpve8~QeXBIBl# zA@aRO0!;=3*?83N(}wgRQ(~~B_)^>MFAJp&M0OYy5hZIL*y9BRJDWp85SKAJ7Xck& z`gB*+l8g|Ff+Cjv42ehLrW^BJ#0r2TvMPgYA*nrKTP-|22KTE%p7^yM^94QWD6LZNqbCJXG3U0c(8cCCXUsa zYUG4QyZFs?Gt}HhEgSpb5>c>>gjOc7?`d+Wo5!mc_&OGz?t}a*9h=}t_u=kPL%+2i zZDU=(e`H753uHI>2-bAOlc`=eaE9G7)iL)P$9Ev$B5rISRW0?ywVbT z@A%-AXW8E2m5FqEVr4j2Ev4PAOd=Ln)l_LNmtC65D7srcT3A37reJBqoT8aEEv^LvuX4ngimZSyDAQiOv<4Q1VT>$_6cUpS52uPa`bCT? zI>S1^%>Wh*v*?0AQ4Rl*con=@=TdQ>+;~N*zlFVXX2VsfFlM&-E7ww*s(n5uf3ovQ zelqh(+U9d{4J_^R$R$fN%3SF_on4`gNhFx##VAlO-h??BB5vQ;FrBca|4UA%V?O;P zN*3q`8nM2qhO#I( zV%Z9KdU2I!?&t0b&CUgNjc5QoMnql!(DeM8oeOF`R4S-$s#Z3_YS6sE6Nie*QZ8mk z^>jsKMOdMsrwb}oGt>$3W_Urj(|$bg)diU&rRj{|D{bQy>WtW|qt{TW zj_yUl=6|hUuTId>AM(5+WwM{%--vA6va&p1pBO6+wb*}=imtN_9962Q`)A4{FxL$b zG8CS4L^V!WSXoCLihUqvDEycnLf5RrI|u)HI5FE8dT;RyUH9w!Y$|3%*}oQEP1a>i zFFWwV+Lu*D1SIK*t~GfI$f?IRg0RKt(}`Ns4QWBVFJ7$ZiSB`48|OLn4|`dF-nMa0 z)IEJmv+r7%lWg!Nan`g*_R_~1k=2!@#oBD8T*$S^%Y3()g5lgdSsaGp+zi7hMO%)d zu0ccAZgVXdIw(dr%H}L}V6mpPlRDZ?%bLt+M^79=cvpo{Y8{dQYTMuYWm?4fjUm*J zuM^954P-XnA?-iCar$aM-P^qD+6ru%w1wvpQk=GAo3@4v1DSZVg|@bcVN4}uhQr7+ z)?jt3)_P{XRtkYltdmde_;E0CT4T(~oy?haBGQ=7aAsRV-wf0$3ZCM`v8Eg1rohK^ z+q&+Ifpcy)&I1DL%g)H2QIz3$Xt>f z^-!w$@cb|y1(tzx6T7q?8WUXnh&E4AX)fU1T@j&U5g9*EBG`fz(70Ttal*jI(>;O! zdJ1sKsuLH`rWq&9=hjR43g#28G0MG-9v@b`U?P=~OBacr-7d+^ztP3nP~ByxGvGOk zCL|@Vn)6;S7E7gB`#iIqp(uy@ig^sZq&dPpo%``%*fMGce4jb zYhLK?bvNUVh`+Xq98tUcs}&&lO*`IiX4`0}wdku+5H_=G&Bf?<3+#Cbvi#8C*N;m`})vk+Be=ej{cWu*zJ=2ieG4VR$S_GlFC{N zD4Ioi8Vw*pkc8a0$?(7`)X!Yh(#Ha=K15S~pYMi6bzznck;%tKC<{-Zt!QttVa?b% z6{YI#(7m0CsAOyF7DK&?cIge&Lq#?01M8PW{eR=mwc!nnJKDAXMaJDtf@F#wMHda$ z!Z^VlU~_a72#*>C*0p)?lL=DvGBuDh)QFXUU<4WFF|45>jnPh z#3TbIDoTx9BR12XZcc37I3~1#BEvTu$A`YUfnviS;C}ljde5_L%(KuR@?@3nXk-h8 zWTLQISViA*xs=T$^2xlZNm%o|JWf^D1l3faZ^c7hs^SuxuOgAv4s%Te}wC$1Z=et2&rEeJ4+8_nx^7hn6H8t)ByH6UZX|izjkc;+R}e~ z{d&?{I}f;QWM#LD``_5WGAHcnpBVKrk-xsT5kcPS^SX4Kr&@78d4-DV!j@UlstT*x zK5ibWXt%_p`Is|GHLJsN{wSv#Bb+NLhG$Qt`f?V9H2@3)Kkhz`Vz9TvHZv;fzyd?N z@~pE2EKJ}bpy}y<{e8l4Cr6)vh4=2$)h|PzYCN}okbJihL%(C7&+E~Gdaxr-sVrQB zD(2FM$IE4L21b_M$Y`+`l|~IAO>}Og0|0X&C?%pmLj>qIlM5P&GaSB0Rrnq;WUoRL z^cr$zXFLV3xWcLq&!g(}JobjNtoy>}XpdA=aKO8a!Qy^J$8bqk{|Lrvlf~1%-iRa< zxxq}olaExojDWkV2gI9WL1GSTeh5_Q@QsqWh2KcIuU0r z#NE!zbq7<|j!t#Z)l>&N$+lKd&UC4y%yY@pZ*p7Z1pPoGGBG|nf(K6rG98vm$BEG* zMIB6$-J5C8BQdEVsSs=R{ zVs+r@7HsCIh-+fz2UiUAxB?EfBhwL?4huk6%IhK3ok$(Ub=RzN?TzSB^I(vZ=XQ%S zT|1LQcmv_JKZFhD#&s*#a%a+StXIDF|Hv`gKq&4HVZ-{#kCBMOeR0h=Tx0ves28kO zts48A6^a|^48`4PAB=E-Il^g8{jFu6``;bBdo9tsZx_4Z$_8eDYXKXcgTFLlcfYkp z?Cw%~4t%W}ukWqiA(yWAz~P7@H5Iy6=KPZg{_V{;gX<0apY~#+>(4|FwmPtG1#H=v zo4>GPf4{v(?C(gMOw(&7R70{UbQ$v=qRs!VK;eH@r10j>Na51@lhffZd2s#0Bz+GEc~hvCLECzc&0+ljD^_zCD(CBF-pmESNfKD974jnFGCInGs5{po~~% zw zd7WahztdIRx#BU*AmR1sYFo6=xEt0&9|g6L2gul(r%Ji%iRvQbWA4Z#G`$WGG&V!M zS~t9jE`R`IWf))>O=}4X){$2mswLZz%qv8+z#GqGMtJEmK&qXPs)Q!bvWbE8Kw!$V zJe;dbdE~zbne5>$}uR{y%h1*gtbl*gtnpSg$$3n5=Gd0y5yW&k4wQ^c3b3 zKrV?!WRI&QavEo42Dcv{m1L2DQg5|oplZ$&OgiEl&8ewv1d_+Wp}@$jGewSb_}!oZ z&^WM&f`%%o9afR0R!}ZA>V|bhQdEfttUHhxPsa<+)J~Lg09-S{L44F5rVyKRshunDA5%(40pHO7^p}OOxD`!6#(94=5FkeY@TSF!VW1W(mtD# zs1a_=1=bVeUG&BMB@fVjS+>v^9+@x2745@wSoN zqKC*iVYGA}@kS_dYTJQcHgdf#AplmokwMLq+s$eQ0;=+K3n`>RhfzgaXHqn*9bX7b z`#L7#{bmvKjwyKDQPiCK+Yh15ojwmT+5U!?5*H8=`q1n;sb&##-~m`Lp7?ocxPMm(x{c4 zC`8bvjv`^dD5Ppv*lWGxsH~8cXvf%LCS%?hGXvy_GecvyMvCdO!$K8?^QP*TsDEf_ zJVm!scf6R%6yt9Am>OSVOVP^K$#fy=azzX2$*mRK4!+Ee&=Y*!Et+|1#bR*O)N^g1 zrk+iXU+$>ofcxyPu^IaMHS@^g)G*<$Zpq%=l3%_hc{g$7?skSi)tx!OwwZBF^JXlQ z6|9snqLXlrKXf%kUj~@r?LO}JV#;>oa7oIIy^h8)h(2s5e)aN-dWF`T-NuL4(aWjk zt2umYb$V)au|FQoxxD4U_-HQTNlz8Swc&VNm9{BvZ>p3I6iXvvE~mVgt?+!!m}lgD zYvdXE7pV1E9vQWBQ{X{2*yA<22gI;i1Nc+*q|> zbz&#r@5BO-8rJqmIZ!B?bu8P+Qe22e3qelnpv`IP74BFa_qbf1@q-W7$2{?3y1%qK zl}=BsmdtDI^zN~--B`1OU#ZWlY2H^Jp#J$%IG+kU&{?o!@ix0)$8C$e$oADQ!UP@f zou8fS09Kud4d!Pr&?=qbHH7V4H)!wqF0A1&sgbud{MG5P(V=`Qfu%St`kS2u8D;_m z&LWI3NQ{=zlOH}d7Iq+Jo6m&yJ>LeV5LpsIa_cO9RjAx+SzlstOyg$N_Mnfd@$u5g zcwAB9<0GZ%T;oNk7=2$v%aCP6A`RI>^HjgEfW1EMDmdBIH z@#Ue#)7wj>?WY$PPHivsd5T-6`Xm0+p4hfGeTKBrd5nybWg2Sa$0~`q%T=4LERQYc zVX-GhT`?CDkwNKJg~dh~MKxZB0t*$k5IokN5NA}(8WFBQ%vsOEh-St$y{jFMi)a!S zn8e5 ztQ{L=>#eA`OHo{R9Hsu*QgXvZSCtTd!W0_HtUEZ>=@!6^RBZ^Y5qJ z*F`#D(FQL@^7UXYIGhXSK27t@*I;cJ`i|yT=-4@4B!-bJy~K3(JSmY``i@3oWMpJ^ zWOinHYI1z6!eep6LAP$@Bdz6Nhp0}J^L?;YfS9C9QI}aml~9eJm?S(B$}ZU|S$4Yy zKr1%1Y(UEtH5tAui6|wyEcf0$KqhRA+AJo z`;jR%;RRLbgMdo4QE3J$Q607em9sD`>59@)(IS}8pNm}PNm5)8Y`X?xs=Scg*83HD+KVHqTrP2j>xgU9vBWb7f^K6o*!SZj)OFp&>JgqH zk73>E`j)7I>LAm&i9+29_k~F=5d)33F^?He=hTHw5$pO5A+!tpwxZUZQFz9460UUP zw2jrJB32c!O7L4kAx_m)7wj8rJjJntbbp4Y%V97_Y?dUd5@IZaAb(mFsua1&uIx0x|M>_vQnuo z#?4fCBs7YLgsu$LxNtATz@~3`NwpA!Pg@bT?$65ipgMjU>0? zTpx()!n6*2g4@>&Q@@$j?O@lp(&3Gy@;V4rr}fUjj&0z9PKSn(RktLl!a0=4u#&b7CJ(_)FT7mZb{hg2A1J=87C3eLNC;^@LUy- z2b#j8meUH+s#b_OO;=dBc&@Nn%P*hUTDZP6ynAxbE})UzcfOkYsAt!n$=$=H>kC^? zEa$b&b^6Y;jkEtBdv5|C*HzvPpL6#a&5WekHKT1bl18Il8cVVk+vD9%9NV!I65Fxj zHQ;O{ga9EV6iT6CDTK1TK%s?~rKN?mKub$0bRi^#7aGd;LV*Hb;e!HhAqk{OJpRse zwtMcKJ2O&9`?lZP{A`b7o#&Z*&-$Ea`Tw^KggZ}0Vuuf%-f;Txp;+W(XLw-Ssja6s z@OzeaIeGrTdsfHV?pX`|dscbRiTU{xd&=VTarJ-5ClQ{*&&F4gxzUAQ06txpKUSXe zB_H{#!R`#6Ye_r!D9~C31o{XDQGvI$^k(%45Rtk~S$kOoQ)tmdf zwmkdF-1725GB-Z6Ql8yD($P^|9Kkz|FXxhrXD{DzU9oS&%(VUk2=3JGOBTkny+f%s z)6UQATNt@w=eQl|3_UX(kJ{;>+3wzvu9z9>o!&J)aM`x8Z0Gf{c$>lQyR+nvgLF(g za4%=->aaW7!J!utBPub$=1d-jcW9ZGEjpiu;ROYO#E5rX)D^H0W}66`@NX7#s|W~c zx6W*(i6pt46ElHQ#CWjYxk6ZrRTyhmydI)0P=&@-$Z81@SewJ-9+tx7*LgN@wHM)+ z*6zg@YIos#*@!%u-J$I|v*W_Y&Oje8UwjzgQv%I)rw!b%%68-Due!`2+ zldoOq)(k689@&7fAo*9bY}2S=+GVrv@S=Q)ZeD1j19ldJG0Pha<*VD~0Y?c@9Uv|{ z+(cul3OxWdjP&;o=x_fMsGYtBHQqkg{fns%a7-y;#_wUn@{pY@_AKliX-`tLvGG!@ z_wwo9k!;vX4EAi?S&AhlE}1VLC=KnI=ouvMNCTy{=eBi59No+gZz%LGPxhF0*gDyk zh$OPnNU9^uK*qWkCwd~C*G4+Somt>sB;EUAI=&siWzk{{ZS+oNBywQ*@XGlgZm^6h z!Sd=X7K_P0w*Sf2QKCrJkxtgpTlhNq>x)0dcU;5{8;T3@?fB5O+iM?$nwgUqXVGW$ zY3i4&RKvorVAx}npcCAWNccnj{u2Yj_rQ%>=`4Qc9Q^lr{CRD#PDaf6Z zfSU-#1vGy`6*EnK^I%^9ACQPKuLuB&6SgfzO50S>5`?=cAmQdQisFK>IpM-m>HNkk zUUAFPfvIG2>cG-1Hy*!pZ*TA3JCA?)=u!NxLq9I>J-f1Uc5nI2;@Nu+9=zx5B0hKs z?_>V7E6{uJM>LbzHP(mq5N`AM1+Tc_hMnTqKP!JH9Rr+UAEbDx5%gg|Nr<$$ERixI z*B>T#H%Z6HDgh0OmGdHbA;~fHp9^ig@XvK^s|a ztT;h#d$pCxC;pVi)lSCQl@*h%JGgbdRgtFfMZ zf^px3HRQgn2Xh#rTbLJjv$qwF=7GR=j8g-OwV`Eoj59Ik8YpFcJNV!(xCM7|K$TNU z-b#v}e8t2l&WQq^C;RnyHh57N(fqqz7@_w?gRGFQbP9<&Km>{=~9KCuhVb&O?hf83WUk3ZGlbLp|0x0)d}XFil2 zP42m3yjY2cdr~WGp0Qjs@Ts?9yPBlA7Hviwcx8RHQN^$cnd~Vj#DzNw&$l%+8YR&6 zi0c9#3TPT)P5GSTjUjDqfHxK^2r5+TThii(;FgRdMv1h4ebq2d!!i}#p#6; zH0Khyz=xZB2AB^#(d>I3IcJBtss4`qkOvyPJ;9~zoZN1t=Hd!cvB z6|>3Y>=j#j=X%dxLySQV0uybk{iChDy7u!}yQrIw#%hnbSbh&^-{2UZ6HPHb7lY6- zWmKpI#OFA;WY?h}5aDwv5aAP2mM938v=kbsumb^G1xl-;B^a=^4&f6_s?9x$?M6ST#x^a_ zOpT88b|*U9LK0hhYnGf5yEA}>iWfH)brX$a-JBq&1vq^~5*OXBMRlvP_9KQ1(%4K9 zD-TwyDm@x0p6{+^3fzKA%H(AFYl#(BA3q{%-B8u%YR)e-L}&F6lQ*Mt!xp_ENNfEN zd2>I9F6j@|j~V3P{WY~shR{P$wLQ?E@9J!?tJ+@Z!KTq0AzOD8Z1a@m#b1`5LLWq$ zv<@4Jv)~Q`UZ&N7vMi{&>=P0t-i)POiUm~|+Db`}gi=#e__&V(3cO##!PAOY6$RRi z3#`U!yE$H3rv#S~NNTbVAE17R5$0DKDnjLZKvg#_pXn#p@It$$TOEKE$7;2iSE$af zq|!}EzfyA@Cb&v1%0lHX0@be+ATEo)xkE@O2%7NVSAtblr=s4k)J%=ztKexiNTLqB z_RZ!a#cIG?kJX>j{PYR*Ks7cx0=8tl1M&}~Ejc$t=G}0T%)1nucS?Fd)0qxw0n>ir zfC;Wf|G_HsizvK(&8N9lSIE(Py8oRBxsNsN$+yY!;3l2fV&2#LQ~fihi9a=P-Z#-9 zW*c&6eV{+Je%W~ZsX|4gc@KWKHO_m~V&1#{)F$)(0p^eUQPaUW167ei$tRJZlyvt;&*dQn;3wy}&ga%z#AU>DruQatAh5M!~e*v)dNc1CA=Oe00Qdh4na8lB?pZ9BN2cgAC?O05#069+c3EJL zq;tfJojx^ymdG^q)NA6^|Mhx~$l+hmy-=T>!wJ=4RbVXf`I3hI-3GAzLscutnkG5-5vm%eqw1bp>!Z{&ls|OpS$H z1Yr}?@gw7gXl;O!&%A4mIt-I~?OYyc@j#fbs{KAi$U(O5*lYr=TcNy3)h#WW2IN~_ za}boT;FJF4b7da^Xsolo~?`dx2_%wgF&hON*z2geL! zmiz`?WB)ti6O{vp^`>8;(?QwO8lFpWz0%1Dy)qTq2D8-*o zNnI$|OVN6qaX!#FF}RkouX9VhbuEyeaxQ#_<{i(GlP;IZL?fBG%-rXmn zSzHHptp|N|4XCSdHs(nG118TVtW|S>tK7D!ZPUj2$qA_R>1=11%3V&L9sxmeh6 z47*s^(TIw}>SAHT`J@0wV`7|Py9}xBj%he#HwkGPxQN1TW?3UF>;|fAB5JUdFITGs z@uKP}9y!Ot_G$^y3imOhi0pn1G&RD)hDr+Md7x@J1cAr(YvX%oA_LXTr9{hx`A$wV zxt3*lp1M6gvZj5}K;5-}(+oP!P&2)%ZL8)GI(p=)E4Ni=CV>wj77=6ICT!QsV6Qqx zI^r5~SnQwGEMw1ZTh}c!6^7m=tAdh^Sh`^`fIR9Dgo84K=}9R^KJF5rT+Wq-)}b^M z3JcAsPH~7&qeE*^9CA!2L`%2-Rd6;^g9@zQnHDEt9P)!VrF!x(LCG|4vUZ6Sh{S0nd!zGCMp^LxohilCsljdRsj@M>}} z-`SZTOeTilb0{&gd#(>#9ZtyB63*6L#NJ3=_Kb<8IBx4srUzrWt2RIj+uD&xO)F~& zVrm=FP1@FKH%8~4b={fE4^%0tWKZ6;HDz_vrPmXi>;`OF$G2*>dDS!0WD^0%U2;N( zxY-G{5Vjj4vn;_Z!|rvE0pU1tN3Nve&NGU<7#uyuJ2H;u1Rzn|8X^W?dmm0cki612(C~)t{`;nIzcSz zljMj#yr7x-W&0w;N}~|jrlykh>|8Fw=aE+N{SwQ6%5FLt*_V~DSUOM^8! zHiV1I;6c=Fj#v@&9sY-e>&Kq;tCVb~>V1URzlWNu^5C4-E42a=BZ9EN5>NB|{xiuK z{j^MJS$}xzzstE1CEY%a&f+gZ!Su-!tJmz^y>V`0tXN1T$j_pVCB282!swhL^Z7Ix zkt-L;h#c4-BO_v}GmtsMLzojGw3yHbxJqZuDqGHKdS*0M+iV)TUlcI4VYTdUsW*ff z+fKttsc0n={HkAGVY69II*iZpxeW^oPrGe28cU>MYC7YQr)8#}-{uUYVYbIRPF%oG z>%OgBe1MGZS7~ia4i7ZJ#9R{ClyR(EQ5LKb0FyMdW_l6OZP!1(yM-vnC5K>xMCw!g z*(=Syl_F5)Ton)kMwF%kJ|(Or6Sw`amY~AcVfK_&Rko@MF-;*48%V-sH|P}X1`jin zcqau!JbO03(sTUSGxbG!11vM*0_}Y5u~?Kc7XX&iKSSS>y(SMBi1}P56=yll6coU4 z`+X`wuG^560W9_avS~xcXOGDJRcI(E_JaWDZzN{31A_UlMIn)bOzDb2NC)yngT;AF z&!GVknsB(akR}c;3l-oK&{(~mjucgnstPeplEX-JG7Sbmxo}ESN!-G^`I>9a z-gX=7YJ7@!a`o`BV~3B*e%3C2`r^ZQAH|g)LSL^YyYqv^{=WQB_fQ?K{Jb9%-VCB7 z_A|8D98umYZ9-e;W0CVo`SLR_M+H=pyyBjjovLckeodh;aZlyF?q z9-0a6j;0g>U!}|6qacmW@as^Jh-cg#!NLOo_wF@k&2o0|jYdI+cv;|OWPo*G{cYl- z$_&xa;>8?hcY$%NRb%60Bc)uH-2h~+VWpS?vo(!%K!!FA0D><2%>FRT;?Z~-yoj|< z2a2);9uIs6)nQ1Qfmx57fq`O*?E4xvwNj8cU79P+gxp*pH3LXv89;6~2N=~}aL8fN z@0*2<{M-Fwsw3Hs_h#~Wl{M+Y?eXb8#c>?#?JsmhHcYT#U@`C;cwR|6N?=^=Apa#g z67kMx8=Ni@d%qt})OI)#Q+zrD{-~BW4UMlo*6oA5ogv!M5V&ZKJZxn}w=I1VC@g{I zGV#}N86DK76gUi$dr4!B<$4J9NMmeOGmwqX+taDfg^&(1dcBOrt-c`!n*|+B86Aem z-|&sy#mw;3xua(~Cc0`D&z-W(Tv0oA;eGqBu*hygQIHI@x%MRfCY{e+WZ~ z@YLkk=lOSiV+hb7_$anF4oAI)aXF^?8ue_!}od7 zYUCn$^6PqAssMRFhQB|pZNKo5^5(IurvG*j*Vx6sC*#Yw?ULv`)Yc^vv8b?qW#-`! z^ddQNbz)9|vzpk5x@7*L2UgtO_e+6|bvVFEh&$hSb6NxBHmCM2$mmE^(utNZYW>Yy zLQUQvoFbwLIZsS(?W)bJBOZGeyWP&&?P~9qz03WxUEaPV-@Xe0fCS*H-3usi2Luw7?t zu{X(iC?;j|kUM;Xl*tP!q8b}x+;oFt!7?NWHE(SyIe5yNzjS(IN13hd^AA3F!Bk(I zz4u)d%PoywS?!2MROB+betVVUS{Xjm`_g$e!PoSRo}RCkpdZK7CZ4x2JqD;J_1Arw zpNh?@L|+Jxi-g{Pa^DKmT);uD8M-fqQh`krT0JaDm~tI z3{#T447qh^@G@ne9=b^=IDQ(#@sqEUDx?Z?_8c$*ETq1H_tXv?J$mk2xP1SaM<$O< z;&;~G@SG#;TgZ3}(%idOe6*c>3Awh*XOF`-0$wfv2(tPFwPIC1ySJSzPNm^wcy^y2 z8b%JPL*=!){Hbj)zEa@6nZrlVo;lN*PONPD=hX_nt@i1~)y1EXzMtRzM~L%Jer;|1 zEntjOowPG>2MFKYxoW zC0JO1i;vk$IaS`^_Infjima2hQl&btKnE*z}8rjfR>iJu~ z>F0TL$77A$Ef~eIZc2|4?IwpMJMgSui_&`7URs^`I!Q{pE+pwauc?zONLnG@Gnr!w zbvryWLg8*vMIDhD0i5lo?s0Ny%rk|!lM8Vt7uJ|Kb#g&Egu~MzK!tR2=^iJS7U1Mk zav^ZQi2V*T4{+EwYAn;?x&DEp`@xm9C$)2A?2_om)iBsr;NX&M0KE~LGPRNe0=Q84 z4aDI_uGkRz)mAQaDGn*^E)CE|OXHbSA+F1VYt*BStmkps6jNAy4=x6=r>1FRjW>p> z1Pzs`5KnOSLy-!^J7QGL;RNYDtTWpRzXpt|+8=Az9mdDC>%h4BU20X`pcxmwO|Bc? z*7~3P7&EUVAL2JSZ|A!k@OFZqrDS>hEPmQfq|kOU7Y1Z?n>;zYL1U8=ce?F@i92O3 zmFbq&w&2HmHIAoS)8RoK?Hz09?PM$iKdYuOf_zfQmI?6Pj3JF27`E!WX+};66K_}% zX~0vcxNv4gR%oD1AlYEPIUFb*f2%JQm3km$tmpls{NV`^^&Mu}T)2Xix)Z%mxUq&GU)N+B~2Y_L(RC zz?rDc^FM)~ow$V>H+D;l^Ai4%!>?oYcnRO=u?oER_y*9$o72%UtF7KRKQq;nPsaVV z)fH?S8@k9$%96F;w6{z+K zFP(JpfF{UEI*GB~0j~eBW+0la2bMaG?im0f>qXurt720#DTQLsFgSED+>y94Mje#d ztN~}nJf!kB!5S@kkx6aKqVyYcuS`IGd#)b#u9nudwGD42no0b25{{5 z?eK8(De3K6*0JxS9UG765xaj|s_D!W^Ef`TZKh4jcsexK6*i)wrc*TPPU-IXi9Vyf zFUC8P;?(2k&@-V2Z->39yNlL}Mjcb}`A`O%c&7$YE8Xb97Fn^HWu!VNVmhyS<^eJl zsFG$R@#8_1RV+8uV?fS@!AmSPOH(SUG4;WUFwV|KDs0u+s%WDrQ91cImc~(YxE{0a zQFPNXbz*I(`uK6(K%HAizmVWzP73qBKq22*{1ZQCdSl17#E{*$Jk@>t_}MqSp;U+! z9O~a~GjYej+z{^BoV&aGtykB6+@Gc)0f=IeF}hXz4OBo+R+IfbIZhLo>G4;aWdz3j`nakPRP-!d9-Ug{YL0rq9 zZ{rtURo>0FO-qO@JKJH-~cRf^%sLjEbHQih$klB36%WO^|;aG}L zBPlD0*<4DsWSh6V%;tj5yk0{iwt0!Q2oAG=aeE1l``NL> zordXW@`GK{ur^(n5t2t)wPYvVlu!p^jemLEjMh z*=8?1P(NOnj8_hS2S0*JxPu{PW>Ot7nUe$AQ5(paiu-y)Wa7lJW+dRqq1hy1hLx#t ztV}gMN`{t#WMzI+of0>-V7bAUGEQa6oT@d*bRx@P3z0VKzLq*p<4p_@nP+Q^d6ePd59+^;*fg_kqJ*c3D^!tp$|GE z5Jb^fXvWR`uq_QX7`}&zbjAh}Ce$jAD{?Mm>{ze_G^0r)@CBpZ+v$)hBGaOEG)9H= zF%W@#kU2Q#pq*)J-u!2p5n}0MWQkZ~$R^hXC|IWnA|YP^VWOfJb@GeCH-D2|Dtj)y z;WY-5Ne=OK8XvlF5#L>VsfKGGz=u5;yB3P!;cA*e@^!VT@x46aCfbZPY9C_lxrrBXb8SkB1no78_ga1PM_4eiVRPzN%qfDp!4jbt zq02$Y!e1%)_w4;4PcMZ&S&c^` zkyM1-4Ztr8XQ1@y&5#XBkFw|SmTmdpT*1MGOuKPKkUu>=X~YZru;b>H<}PQOBItQ- zt|bUJPQLy-MefMmq7P&~=u2fs&@*NQ4{F``<>cE2{}{Gg;8M&4L;veA-wJaDt-IEb zzl{e^ae4f|>f{4y9zi})NEKG_>uR^)m!D$1kwiYfj_)V;7@Ys%59zmy=l{?;pZ`M_ zUa38gKmT>wF8n9jV`L`01UR4ESfKQYu;SBk%2&pcBcqxO5cjbkl7FQ?rkqafvVbTJ zeZD$`L^KHF(MIifE*fx~6`5nnE^gLwEHDJ8J(e2VF)0FUlRL)X)DCG|@UQ+C;DS$- zDwzuT{nmT$IQW8lUwBYb!Tb3CjC2~!NU&Y{=z`9hno=LIh0~g=wcLmpbJuMARZ7vv4_`AvG+6(>@_8E zq~i!26iHZ%WF#M`cIaThaQ;%vSQ)@t>eT=)Jtg4ZG*-_+v&kJdn_RRK`E;zQ+6sEV zth9okVQoA(MU9-fQ)*jGEKN)a%(c;A31M(nN{Ld@E+#68f4pn+E5|-LdgB*&eewSH z)ZTQCqs8pmjGAPGU#HrqrFIaFnSK^|Tt1<-@!EYX+` zgGY$M3Y4NlQI2O>YnO0BmD5d=hj$c=LW^xDoK8JM|0FgtW+3yH&C3&uc&0a=EZqL_ zu8}+M9Oig-J^ErYNV}adZn*#WxA&=vf62n z?modeMer}kI7UzxI$e!NqZmcIqS+3J+D33h?xl)nD4aG}3Q^v~f^dD%_1usjGMBj0 z3O*U-`jATyVs}a&0Bn{Q3b9yWp}hapeOs?t&gYk}*}9r8FBOYR<#g@ARs2YC>+!|K z<6EJM0-pKvG{^epYD{GdmQOoK?pFwQ9^?kP!Ge)g!<6?0l5nLb!WF=DGtbG!SQ8HY zp(5mac9P$z3jV;OI9+=MzN7ZfkAC4c@_?()MEu!4MV>8$PE;fAlbLRou|YFskwT!N z(I=x>s^ownGiMyGrUnBOG-%KU3?g>C8%TMYB7c^5|Kt>YcJ1MxeBle?Ibgp2gnV%c zy`>u05cn%2l>G>mw2~Xi1@Jtw;$T^k2hfr9>me3w7J*y~OOHkjuGQ5o0g@_mFxO#; z$>1tLx>&SB06$KQ93PyJI>~XRYb<%)gTvQNlBt}_k!hZ)%#6l*jxX?uen>Y~S8K@* zGM(pPq8oO^S&epzsgC%VkeLbd zF}i`M_#AnD9aS4}oQGYG^Co2Gl*_!*dBZ87R44y7LFULj(c6iReKbZ3pmcYd!abT6 zKq2lNQ4^BcG!;Lm&Jbqn`AU-{1&FLMn$$`YiLvz1#OW(eI_Yf2UWM`^e75#>I8_Zz zj91%#g&K-q{O=#%^S;4Tr|N1b+&#)fyFbdd0NJ+*w&KB|cXvgXP{azYdj-Uw4ZBLa>UmGMZW2n6XLRJHnHEQDNtkZ;HF&5O!@FajT zLzw1&&C=GC&IkZkySfSun55WgB#+WoiE|%m>)B8)Z|G?gpJ%cQN49P~vXB*@evYWY#gn^A zrClc%`KRO^!+%fbSRP$h?TALB`DhMYgCW~8lv)yAq3H2{hb8Y=wFE><oPz+?c*rcIwz17~6zW(}jPrLP@(|%TY1KThU2jJxdmLl`FWWaK3k3i8;6T@9Jsc@RxYmz&QwT0U(EmEWvJl-ki8bIXCuW;SKxMhNWOCN0|#jEg@ z(Jad0wCqd~-)44nXKuSK)7@cSddumrPj4%Cmdek40lqStjfY|hWjsMn$2cUir`&uQy2#vkHrV0*wr;)*Bdg9y z2`mRYkxbW-^TN%;pVDEyYj%15lF4MIa^K{Zl9Z2Ubu8BnVCJfICQKu zd}w;h3_hJtnr5mvH(1y-kvE)>xoSF&F;wWzn`WYadSGyQJR9n~CelWW?Iy?{pCNO= zMlYuOp^dm5+zdbS3&F`+50SNSKI}DrdTu#{)|rC>G3lOM+NgU#gu0h`Qn}>XJEGG z$TJS8x#5A0OyBKa*dKW8Ip?0aWo~ZEGtWg&9LD==ADSE zA`4oQ+`Vu9^b04Mx_kY2&X4@PcZq6w(L3E#$w50x= zT14Z#mFw_Jd*sQ}fjm6-0Q>+0iyqktSji^cy~mJvIm>+q9v~ra^3dz|Kli!&Uw`xd zbF1g}!~5VHYi}U$^l`ice$)3VLX97_{a)+ZewOL6{cQ48;3UA5m7Uskj{DSu>d&dg z2Q-Z^QS#In)YwEHqs?@>SL*D-2l-!MyRa+^_3lSaXK?31zB;l0iuZr)t~>6y>tmm~ z`=u|%_~G}w=i%DLk9>rV3;Cvv)GzWZI)5BQpI+`zY1^Pq@A?@8&z@9&>duoa{pY}$ zLOYr49y)n8ZR1x?fA!ASyoQ(|pZL%p;q5>CA)C{)rFJ)#f@Tn~RE3pXnGH-zq_dhD3-wAVdZl35f*QRHBkBfSY9Iz_XSxNc(Z*=7 zun0;nR7xe7ov!1q``{h--YlJWUxvVt>%aT&et+8feM+lj)Na4`j)M)l-P7+Oav~n7 z=E7lOBzJ~6%9S3{sb>PCu;}?2h$?_{OG`O$P+J<*Pcqs~=muKrY+2!S$kwR7BnRo36s68AmX8b>7sz0vu^sjf zAxlmchn({|FW02*j)|_JWTHn%?nw?c*GqYisImokHN?1J7j!fMD+mP zSIy@0`GGv74)ykQcV!Zt;Ne!r2Udnnefy>b^)<{VggK}SkVKN9aWtAVB!*^+%h+Tm zw;{nhSXLD~TrUPcHm&gn((aIK3>4Gh+NIe%U&%CiUz*UDCVWs-6Q3w{Ov`j&E?A@B zJcUdQmSWUQk+Mgr2qp_Enu$9~9}xlFZ-~E~k^_ckcx<;v#(4dE8suvBpdtK@YPPqx zcc>TAH6iaSFETde$Zd905qpS*M=5cU&65rp_?_m(Ia9|{l7`l4v1vMGxs^6m(TkcX z664%lAC^hFm$Xvk$+8qGmB713NgCLiJlIf~XD+N+WvF?&rW<;~-)WSk`5`wb)hI*r zpWR@USksUT$kz0Dz?zOotRk2h8)C3Bz{b&S%(!z&k88BY<$XFDJRcUgtWC@y8}PfD z>rlmMI-plv44HDzcJ-Mix4T&XYJA9c`BZoYYR!s5_1Sg-7r#by8}2Wl%AKs|o>U^cAZ?Hyl0d zT&FG%I+NjWSyjj?6#Y5{R;OmNc%~~z8e2f9l5?dHuZLc6C18Kl4M7UlT#`G*WmJ@w zE0ij8(&Jg2E8?Pzj=tapA3rJsr1u?_A<|opJo~}VixBDOJU-hVa_fsnqtTvdcanms zg1=Bky$b4vV9|UHEZ`=E<93_}BXf<~vb(bG#z}qN+f)Pp?dcnZ1NU+1zdeK>IV2sp zWXHtM$FC-PH{@6RZD4*y4{Q@G&q1U^P$(mrCLd_RDr3~LZ z-@Utdb@HD0jlZ+&otm}!lcgp6vf7K7-+d!~Hhw$LVLx0Ai@bH|OUSTTpocd)N`pxS zl@Qu3P;=w226tvM?Xo^5d5zWDj$>2GORm!96zSJAQ-@Flw;5z4 z7#|bQ83?106%MiNfu`z1ysn`_A!E3U*sj=Qjo8Jso6EMv=^W*`_J<08 z`^mz~+ULqFrQ`Sw>D>$1I=%hMaOY~Iy+3z4dpg(O9$D=SU$NtKmiwUaztE@tRyCRL zO2#>SGS5Yjd*V!*yk!Qs)gU{^l$@6I4H&Wn+$ebZ1jja@cRW{+Sc48=9nw(0Q~{B4 z>JxJ1sZR)K%oSN0I4rm-0IL;SrjIDoP`yiBk>m1Y!g*eyHD4YFeCnakeirxD7PoHQ znw&ncT)bwuc*W%6B%WHmZI%2gma0=-G17zaAy}#?chSNRk3^H=J z1Sk_o*vAQ)x|1Y-OG0&pNY-Eh1ITsg;5peA&#;~jkc4(J^~Y7>Nwr=M`bKbRc!WI; zIFmczfFRM8Hl?%(-_&U_gFBk#SIK_$WYFQ6!NViNd#8tHzWL2F$B!S;a9ihUG*%kk zb#?6k{-aI#T?dZSy`r0JXruU1lqPf1-5oshvQ1`;#Q-fc<-n>nbbvbO2~!^}%ejC8 z9FZ|2!MvCV`H^W+FZ8%ing;Q6VII=gFpKGvF1jR!Lk`)%-E@B%?{iSda6*i@i)%q) zID}|g2UL#G4Is?U82A9!1JBBV*$69{IgCkv*%e&-z0>c1W5=)Uy!C~zdCd!N-TCs4 zH{Ep8O{cP_fB%O+{C!xqaKc@@sBNKn9)0M^>HyqL@}ihig+b^2)giHA+CrKgvFKHu zJZF>`^DzTPQKNew@F28{Of6aAMK-3fb{hGsmu6mb_#1VbgEGWpfY%}??2MtX9UepN zWFocmIA9x!SZxvLC70|rM7lCWk5_5=iWzb| zE)?N!9e3x z^99m-X_-Cv+oE1PF}`lGxsuchy+20Mi*tig?5t`hC7PPdcE_A+}ye2o|R3 zcWQU3Jx5I{FbHDU?e-vP(tRz2t1HdqNVg-_?QBQSNon}t-M*mYV8UEw@qTYlzuRR( zj}G26y=68vdw3J+@y`Ajr$c{S@}NKKMLl<4y>w#dFzH+)WJ@x14Q;;oumA1L!mu(<9M%TO10ePS8n@h!V{ zZGqpn-FfG2lBwZIr^9_$2`Z9`g> zb?nBeMJjYpeG_iY(?-=&-Op)KSI%vyx%oN?a$J!c{}VU7_)oy+fZt*+RfON+&xv3k zdQLUmolA7mtyNZCrHK)fB5HwG1un&!XEL!E89jokQ@*C`fS_h$P;m&$aDn9tw#&l& zJR?@I?77W4%@=uZaoe>kI8|GHytgx)9qFCEyf;=FAB!&?m`>v3#pUJArEB+pA~TqY zX2}w4J86Y&GdHmeU$Uw8Z`3B=h`tMU%=F|0Igxt1!w$JkY?OA)GMxz6L5Uvf8T;LUAl|!9wssS?!sCsks6$Q%u+}nYTk4!iu0oo1e37Taf+`#BBA zCZ4hP(5Vd_u-WY(XGE{|1WKTJ^vY^%!|dote{VJo>2C6Tn4KrDkR&!=H^w@B1@K*9 z74I^dc74+n9J9LU6H*{ArMQAGL`C6)rFbv+sG~U3z3slY%5eX|aPPwS=xpAxyDH=3 z3%y}{bo5ANhAcQEb4g|Yv8ias%+X6OKUwKWEuXt`@z}ukyEcq<_fBjemy(ITp79O% z?#@_w=dnuH!1SIghlj7+Gd+9FOZFfAt=qP>$5<{zirn))PV;;2tUH@%N}$N$akC)c zOo?a`O*P9~$ovQXbDf;9Jjn-6rSagxJa!^wSXRV5&eiV6@2mafaqYF&?zr#_(xsDR z97pLm7SW~XH>$azL2_+FiwlF74qe)lPm-n679#s*Lf+yhw@qriz{bXIg^fF)43dMx%q}@nre3<-x(_%gV{| z@?bPNIy>Jrarx#UYD?{?{hQWH^Fim)-!xh$-DHm=cV&H^-kM>WYb;PA*M>^K9XH)u zzE9EerxQq8@Fz~@v<@TdEu&sv#&jJ_3O?qES!Da)kj`? zc7f+m(f8^GTM@40p8Btm5UqmIP?s#}V#ZTcsi0;lR8zjDY#XWf;vgl93Jd#CY@3#? zgXxIp=kfd7dgsQbu39Oc*nHqbrDJyN7kIUn#)rsdax7EsjdymP?MRh3@9nRgn5;c^ zY8SpMGf(fs4{HVTK1=9#;EbJ}o|-5P_4h`?XbCU5b|Rc%CWazmz$np;fyQ=X)Qk2= zHkxe>q}OBS2`>6|n0YvyUEDYu@0eZOn7aJzQc75Q>CHD@x$Tyo!gIGQY%R7`_r2|H zVCjYXm$lzALlJA8wjLR_aT=G1qVF`Z^@@${F`I*uiaSqh)6MxmV!F{>UtP7_uKS}G z?9-L8@lh3#JQiLBRrGiADW1N#l@{hQ`Tqp_)t%lZDT zbA{g7ZG(S;KV5rAZmz$wx@)v@=$TQ|i8!Z2v5?u=j@v@|uIs~Gk10eHtALQ=9(&GqqjFUuZYkevS~TO>u$rpSxC)8W=H}Mx3;@ zZ;@%wAio7(#YT#q5c<#p*3jTPE2+8oMxG(hJn(iVoN1+6l%u%Q{WmF1EMWJLV~P9D7D-XF6X2*6so zRF*;L{JpXBNZp|=fFvpx>3A{AYK=z_dR0Nm>O~|dZX|Ji_xh2D>R><4~+6-^;y zptl3iA?Zc6Z$%n}e~;BOOsruL zC=g-GVKDd@uzhk2ZGkA#%F^sK%-6boa>TnoHf5waOc`*4xq_69QZnGuv^rp;!E0ip zne*;|Ym>9;xmHWc-*Kz%zp8H6#EY$m!oQ=|UetPVh_yj+sL?j`?P`3>rnw5Njyk^f z6+d6QUC-CvXru&k^g;t2_2cKKZQtsV45|uf%?{K!jkwi_{H#%OalG}o1ScKW`dW>S zPGc`DKA%Q2kmoQtK}M#(C!gqaEXgT7IW?{!qoZ9XlXRd@(=hcD3i==!uh+dK@be&L zN-3SXJIrv};8-`1E+U@z_ zlFWOk%w}lj!}|^EgKb^y(8V}xS|3dI%ib=`XN)_hyHfD?EtBRQ)7kXnUE|5ebpyIJ zJD&VETl8-@WbG^-TY$eN?VZJISUxOp>;5&(X)d5YtHx>dhJt}0SHLitV;Yn!!EIPc z_13ZMG{?=bn?&FjvQ55Ya6!_RIYV3_>J0&pJvlmT3l{*FOec>*mICIvpF$dO7*y}s zs^pDSI%$7Gx_3cL-~xcv{~n}yPfbC>H-7A^kJX+plD+XgkA3xVNcF}W)8(aox2=k7 zZ@_!fI4%O5of{d4%);FCWPfie;o)AQ`vbT(X0c^fa++avG4>T7_fks^nI<{}a|?1A zWCHn_3l14}d(zhm?SMbbE>~Y$E5rjQT)KVjKo1(j zP@pzr3Vo@XhE%zDXAGE&Q)bGCTU+#^)tUm{U888_9wz1i*_3&T;TmDPgvKEk4XCWZ z(n8xhR%TnUGUuxk9tc}g7m&$;?G^ery&j;UofzNU5~P8`*?y!Rsp0>I{vmTRsDFsj zy`KKDP!|2Xnssc1Qp8e-wAooF>p}Bg@|nftkql-{Ta4BYvgowDgE{mY>!`9{!h!@` z!z>|_v;+yC9X;){qYdZ~!!&^9Oy_0*{~l9ku=X1RjA$Z;42ww7MBiZ1kk^W?@5J{o zyZJ*(Khi?rj|5?Y=)5nPOTocuHNE@)G{&we$_4`8`5!0Ui-MIwB6ikP~V@= zud{&x&h~1ta&kMN0l{iGKhH9GD5&{Pvu~f zoK#l3sY1gC3fSH?PF@~o*sA`gQ+zeEEjtsg&J&)1p}Hhm`HU=Hf_!Ur5hE`!|P}27U9uT^^5PN ztWUIy7s(%=CbqPO;^=FD8R;)DfGDjAkqT=w~nT7t|1n%DWC3@yi*KL=y^6 z>x85DH)(|(Eie%}bjB{>vMS8e9n%q>&HxEzGm+#JE~!F70r>qn+4x`VV*FA+=Ic4o z@Bc5~AAG**`;)_@_4`{Wj)(pGhd5j0{;|jof{^QWb;zSQZX%4jm$0pmA%Y81zzuZ@ z$fn?sO+o8XR8bEu5*}Q!4Q*0Rp^)_821t9hUZx7*6t*e{Q^@1XU84@4J*t98znYy{ z8MU9Ot5)>1z*_%2Kfj7NR!tWhSh8Z#tqjOhIGY4(~c?wzqJ~t$a z-UXG%1XN;%Y=_luaUIOlSAW{c$~&K@Hb4QL2X*gPGg zex|rpsst&-EEttdTzVAmMUhJi+fN12ap?{=i704^V)q)cWS_WMl5RGs1;U6txO`lMz3I8!NpS1 z{ydJ?euCq_zWTA+?Kr#_hi|Jswne}5*s;ft9lP+g+ShCU8`2*hc)%Tp@1hjhSSHat zetC6dYATb)Xlj0Her`i$c4m5VqBK0v-NVtr`;nDSc zN0jd_;0mm;@WVnSrk8yGuZ%XnDv-fuDDYp4-Z?R!8rwc4SZ=1akEI^>|3j@;oi7?1 zxtP;lPxDWEai$t457MiX6CoLowgCpHEm-pw;MmKzYns_XlT84JKB-{0F+UGEYZY@f z!W_MHk+%?M6ngZ;wZ%9pnDfcFyvxAvF4J8yf& zjVz4{`tMm8UO&Gnno?Kea_!^ZgjbDK7u z+dpoEJEJ$Wr#D^y8l?1xg~rj3ApTG;_4lTe0g-!Z3$~NxI@qC^#twn^0b)&7jo0@+uI=n@Hd*VHd zW_@?bV9T2NyOjdoQK(6OL$qd%{f(OUHw)IZ=x?o?g=lK0TK4x3AU1R1wQK9|e;{_? z40?4nHaR|2%y-2*JXT-@(yPlUvS>&7qLnv$|58_yO_v%=1+R3OEn@lYU%kaF927?3 zw_0|~8-=}ivBlD7_Y>_Lor?+7k80IylFY|sf3h!=N_2*7F(c)Dc)}F112^I)JB26l zfHz-WPH5&%fRGM2tri&3Qaoh1kgx#~O1+6?@$4u5W$;O(4hkr@H<;L5)E7;33k7tA zzTih}UOsxu(t)XDa_Yd+E$6;`{La0-y?gIG&Zgp%FDmanyRve2Z~4OWwDXH+?>Ttz zp0kS*lP*j9IS%=;E$-a&^uETf%!q@ix))V>;kJV>+?A%%@}e!(glxHAYzh!v=Y5;z z5(Cp-2#USdsKWy>2^KkIBos@yATNT2zhK)ieopNHe6+R+|4r?^cv1fWbr)ZM{)~41 z%$H<8e}(#X{;Gvyrz~g|G6L_sp{tD&YQVj^gS`EE@ObA@O?V1Ymy{L;3^u+})+q!LD~Y z?BHa#~zt$}W zLyW~gP~XqTs&N$y$dijVsf6{`J`KkLr5eq`Ii>Y_C!y+@QF`d7(jQ}aIz=8oUzdG> z(Us)RGo+nI{dlq(7gI9c(H4Tk)e$$IEm5o)F^W+SgfdC;fZ$y6cZXY4=e_xa`0Zt- zu^M^l3D#%GO?0WT4vc^*CsXs-EcZ2iK&yqj#ff^B7*QP>|cXAa$GqE#q)6$TQpTT=PI>mhvln> zU{?uJsJMUEr!?eEM{w)pJa!u6!t1mw`~!AMeGm*WFx2+lSRB~6VKzU{47K^fC0)Py z?Jzn0AL+&Ml@DYS{$4!z)6T3Xz=05wyE zzVxQdCU;+th>_Y;B!>DR4q5Zqc9yY0(HiBJYl_Xs4!)iJjnVy=Qfcm#gDpHA7JQ*olyU5K8_;tszY0UtPiORiAEJu^!wRtas8chii z>7;v_>L4eE&8}{)Hj@@FY-&~r+d6~478T}HQq&SaK_2Te zfF46oAI#aEL1RgjMrsa**jjg{XFhRv?hUpJe=X9gY{NFvNe?Bnt{>d@z zJeyA$(nn=_AG6U*n&ew$k(yni*&^9W7s*!2&*4X$BQ2!!WUE%GDTcgH(S#cu|HL`C zJU#hq^g*?YkCErxL(kYQ^toy}P0rYKSDIX!+_P2M>yqTm0uxRSpzS6b!4vn_wX!`U zCX-PEw0XwK7KucVjH;ZFt=jWxo$w!r!%p%cJDa@6&Uu9lfv` zi$o&rk#=B+Gh8;RjOqd-!)SPivCI9j;UC2T)Cd*V@D8GS0+E+FDvp@WbM7JX>-4cP z8oii9kCOc?kACl?`CLaVWb5Lx(bv@mXnzsdAySCfO=C>y2_jgQPj`EoQ9?oRT9Nje ze65CULKItS#QRNx^q8PB^Wy`D-n zUIQVW(z{dFUdS!hm~ZwqfZwT7#A}Qqo_h6`V1cND8pM+nYrdhdwzcI?9Y_FrLH{9LqX%qWCj37KW8M@ivP_gJiN2y#tEvuKGVQ4(XYIw^91@=>x$w;g@3=RM_tXxYgQ(AiF>>`5 z=Q+!@51~J9_}tyRYg zgU>1xU^68rkg0h}`-}ArX%j&V7}8um*+8ANX-@3aAggmM=?$d4X?WVZOcUe;(ima7 zhhIXe9#Kd|wv&nbw%Y&Ht~-p6YuCZ#eewaV|H8M)2L5gG8AqP}u}}wk3>fc-z29Mz zkp{c8E=n;B;e)6{EJMwWAsDc#VDueJ^Q^~_2CZp31F<5%kjuS0+gLkopEId#s^vj} zp|cd}O%R%h0wa(XP!%BzT?j~+;zCE~m7$cR>{%RGJhyl?cUIhVea7*!P2UVssN22841!s1PD$uXv_()`E*V59i zp@{grF}`x^>gug4aq&sL-cK%_J1{wU;M|hpk0BTXi_iNfUhz_I@5sq5Ih#a(r~4Kbr=&XQTcOIReBMWI;k2A!^d? zlHkYJzIBQD5)ViLAEJ)S(C^sR)*@`nEvvW|{E4@d&FqHT~SVrX*jHj4wn2RHyX*FUHQ(}Od2wonjKN)Z)cHhz1cYsS7cZZ^0#;P$Pu+3&7VSWPI`9DlvKx*lCSR8@Z9AqT$U9qT3O5R- z*ALE5*zxs?Cga0$JU-HFgkW)3$-c8d*GLupVKoc);l+i?iNS%cj8QeKzBN)^#+tQ4 ztdw4Cm_4Ed8oG)`tBLhOjGHoePw`6RXlmYcthnUg_O zl;V*|9S@LcotauGL{cM5!;4pM?(f?2>??E2%L~ce_{>UqcKb+2M{#ik?>N4kOD>+h ze8+Xgz6~?eFx}skER1J+hf-~(ouApaFmlDtaXZo(dS*BtwbMhh-Mu4SF*DRVy=!{l zvTbA8&g*0GHbd=W3vI`#Y7W9CD@!xerJ?Sez1`Vfm&LFXE_|D@Zf_x0O#z!$Kj|tS z-f_sq&jxp@Zt9j<4uSouZ2GRHAp#Kl)~*cMM4e^Ry~Cv0B{`|mH!BlF(j z^L{-lJ#nISj^&@SYv>1_%5cJE^{EZ0h5&-9kB(;yJ>^%)w|E#eK+ZiPv4CX*FJy`pZfBdGharcM*U{$``(C0tLg51Wp=DQP{?oW-dM-6 zIoH@R%?K)4^aTp6y*4(@F=-`C#K=qqJR2H_agqMEsgmSpH1nz4Km{$M&bg0Z@a)Gu zp077achS0RV9_nDN0rl*IT!u_w^Ym^dxb&&+N)^;jr{JnrIsp#=kgOvD@&OS`C}q~ zZm_b{$QS>sPGQ?3{#gw#9N1V)+IF(I@!&kZSc@%gD>$M?{$~AAnx`^{9)}wF>8X+7 zzTWIy*IZqVJmp3f=-yEItTh-xQ-*y41nNdjO!s-}JfLT1uvlRCl6%z(Idg4|jgAJR zvkq~)o-cPnAX%^45sEbqR!~7YG2L}A4FQX}v<4~5t-1;B*{|UEJ;6N*A)9YD?>y0WaPP06pI38` zFEKeWJlNBnu4F3i&0pFn!^$<@)Z7gz6gZuWb*)2<=5mEKRrVfLhRwuK-9+A`I0*b3 zPD}X)nQaYb@U?;Nfx+OuuX71p7-%t#<=(>l0Ym7~YH!E^e?Wg%1Q+_6b~PsboYQV5w#g)pRXZ>$j{tY54Vhh&mI!!_45W#n zoS|uHU~U1&6PW#H7*~P&5EF0{YsM3CFzZ z425S_5kM?5f{dm@wjeH&ovztIX>`OC7mI=RfA8$({?R2h%GNbJc;%iM7>ldLAS@kP zEQGvybzRHkPP_5iOXQe9o!7(q8Rj>@o#3?*yqRvscSuwKQmUv#EJUswntQ3y$$b(q zdp+3!OU;^-cDgyvgj^5zTAC{k?xAK6V*-?~X#&>o9BgmU&ayS{)ro>x%9%PwWSNDcKL<- zv?ne+An=esNc*>)#Wx`u5P*(CLzU66t%JkflBh>47=YlkLcR!HDOiGn?E}mU4G6d< z`dq^=(8;e76=N9m?5i*b%b?ZiRL%q%U$_WpsLXION=RTbk(~;3XaPEWn4`pc$IzYl zd#oGpepc)lKVP-Y3*Kop>`L(m&KNL zXuHnrxbU$vNa+K}X-J{JsKz6aNGbwuy^eM-y3ACZJXe%Im&~23e|@EnlGX|Yib)h2 z&)Y+-KwxwJ@+}g0)U*g)cEZm^;PF#RKe$;EN@p;f?WFb%@;3{`@Fu@Uz*9c~dlm%Y zX%#t({6SKiByJC-DKZAV?emPfaoG5|2$RVnEFai0{j-2jzPAY>%Afma58 zOaWUI(Vz$}x$ec326cc8@!ltiHQINfdf< zz8Yh-4xQ~`V!TURY?1ujByXfgFgBuOra?ZnLF=(tlJian9tK5$764AmkF5pL=?x0J$w2($U7LUIEo&%D zkJ16SpLDjrsG;!eTWF8(y7(~u8qIeZLJxsWFwmc;eea8MFF=uY4-{$l(#lQC3-=~# zJ{w?>&o&fS>OKV~xj^_?_6fBv40mD4kJMF$*C9yJGSLn=^J;;hQKiGh>e0nK19cCA ziR!dRcc0*W_$`VLG{W#`frS>(3F_*>S425?Khy>nM0v3XgigjyI_gW5cZ+=K#yXTr zZWp#%#iI=nf2tMb9u@1ZC=V(r4<4lV^1QQWH6JI2``&n@(0a!Tqxz1bfT_?|Xl-NF zP={Ir&PR%>RoFwS0phkOHDzXA%W8BeF!etH+yZT0ZMp_Ej;F>{Z#q^VVnel|=}4WY zBNb`2roKS2h$hej)!67rvCx(A)u7KoHTe)!lMl1~!3#{(ct-+wqG`Wyz(%CP6`8zx zT@_hJ^?IGf*|ZnGFRaktY1)r(ld$?tCOyN{`URcy31TZns@=&X#3EVMdc4CRT)oyf5obma?0{rXa?`t^pw zz6y3X=58w$>>pFq_? zX+Ffb5bt+CW}N27jPogIj2NeNhj{N>Y8nLgre3VM{v2x9l|fh!W}LrV)Ud;aYR0YMbv(DPX7Ge-DSi+8tB!y9LUyGi?!oc6 z(g$!KI#`EK2sP}zYpG!mEI{WsAU|-P=Br_!ZdpK1YsNK-p{K0zr%=JJvGQ~6m#AQu zDjHR=tCKArc#(O&((m;bnK#OLNNcw7Kj24DKl;%xS+gB}nrpVh{+jKltKu3Q#>J)u z*A@H$(t&mL)<35m`i0w8>#D7JcXAuxCv*>;#J5ztL$*Mkg5_;bhU)9BFM$)gfE{}P zn@*VAt=b_!n89<{WYUzC5Wkr1f*dA{C7}{$huJN1a++!+Q_!>>W^i-e0H_DYj31rz zK(I1d`UnX$!Rpu{=02is@uFZ@R6!b*y?C8keKnE1HZ|pCbX4sY0WtsnuCe5G4-Q{9 zNoEj_D2W`_UK#a+^{SM;u&F{?uAOtA5I$houLyWL@BX-cwV_6l z(AE=REO6uOZZ#k3chmG5=f-zvE%@WrbSyGfhD>Ql?Tu7o-dgbO!@veWUbh`#rC#OU4@9DZBa9H5HWH-=?%?iZ#M!a0;3)q8g!#=zbH1z@dl za)LlrjCIw0AkFn`cxw7ukI?tTePVyBF_GA*UurQFbSx|~78pg*9FA6V9WjI^#s&v+ zS-@?H&2`L?pF|x?TDf~hNFS6*A9RvFNJA_b5TDgCQN|&pjDuh}1&b|$>7ZOn^zCS% zFQ7MJFEk;mh2ihIy2#L6_7v5)6^Tq8eDdKjDN__1BHLce62h@W8amz?Z)-6`k8g7Z z(lAQ#juUuat5FiQK$zb}5^JjcpHv{I*)>iU;?r}J5q&f${|}NP0-|!?=a2eNRQ^9# zOhGMs1!~#(nYQKor%!+Xv>;gk*!=tN2d)X$r^MESJI9~4z|AVUCKXH*2y)WwMIT27 zE!80ZYO^p&BN-1R2HY|6-=t)%{_&x_|w(`w1NX*eSR%UU)I* zNqK}k=MHLLb)nC+s@+%W=DF{Lg@z%hH8`QvO8q~LkS$%tE&GqfU!eOeTqXKC%YmDV;@-FKGjf;=XA&# zcdzHqEA|v>+_O(-jXRk@_{BWf@VvxWdT8SG6(^l^He>V4OCOZEj`kTSLgp z4-!JJZ&)*4fC6rV^a>A|(XI?Cckg!_8jy5aQVYo$%kfjJdB5S{9rvcdn{k>3wfa`c zB$v;BHGTebjcfJQ0`$6vUuUJdw@&@8>5UyXvFEz_!3S%WcrIdJexCd_N_N(F!gDb+ zKxS3S=Ne`Zoc>1=Hlr!*JZ3Wviq^E#5Cy*1dc196|7p<7WAmBZ=40KQ@!EX!&3-Jc zx?Ab%d2F|<>1!(X8RQjxr$t`TAg??3()<~e=C4sXyKGCE@tSl$!(Z!ic1@JCYiliM z*Va_d4kU=T*Pg(qcM_ZNH0SeLsljgiDe@Wd*smSviI%nOWu@92@lA+_FHB1E*Q}f! zxv6NP7+ppyjQ_B5c5RL2?D(6^J)UvMCx5XpI94T`vm7Agj)GW*H9}zN>kS{FZ)7k7^IQ+^=EjV{DZBBV+*aV|MQmQ>@1dik&ZzJ>c<(l`S$l^C{S;-P3$Eu z8(1%-^rqlKY9VZioCnDXChLX6{<`iuNwO>ItS(sU01>fKnbO_5?3ZN7cDW!82s{S# zB7sciRM_H|Hf?j9U#xv9Z22NIV1{fr!5B=g42>tJU}$W5zviXfnc{geF#lh5*B&ER zRmRV`ckaEjv$M1F*m>=Hc6N96-EDWbyWN)U(C)U-t!t?e+5TTJKHJT99 zL?Q+WG)7}$FbdJcDkwk)+V6=R2=^?rUbYYJveeWO}}H z=YHQg-}%n>_^Ml`?@hU6w1S(bIg5Ycx;`uxZ( zqOe(2ubc>|z#3viQ4LY=rd0#2W> z=9w+bX2Zl5qDA)b;Vxkfl#Ph_Cj#Wd+FP&si%98BuA?cIPdB+~bhx7}8F%N?Z6Gf- zo+Za%hUfv75T{%YWJPye$z+LX_Vx%QCk)UTR4*8TP#`GdIhtF!nGaI@bGeEGBD0obTA zPGHJA;r6--9k)cY+AXnO8Jw3F$+ddLpx!mLgY`=T6IrmHu=D67?Fl=N=0}J7dowMK z5qaw#8Gw@4xQ6v+D92ADS}jQ3`)cMLLNTg(wz!82?~vUV5rbcoW1jqnO+VV8L-XCrwe#PuD3#nHZx_Bgs`Q_h9!)Ln!Mu-gZE{GSX^Wb zqOm0K^TBPXmHZFlwJY=;ATSfgDrOn^xUVONk!*umHPllx#{5QPxpEfx6BfKS+lyZuo$ zO!9AO9Wzx2)sPKUg+Cl$3@EAAQ(0?Pq~0Et_*nCDAzvt^3v#KhINrl#G(!1P})c<{$lXv^*FqF z2A2JL;npkr_4=V=xW@@~Fam%Vylbx{aJ22EB=8CPV3Un^0cZlH+C(bcg@=ESa^89I(RYe~IpA4SG2i1aa_*&#apcdZM=%2aOYjfBeCw<0;-)wIWa^GAYT^6_% znRGJV6bMm zrgQX)fGOPI6<%f^<_@`?HhZb#?aCP+mU4^=lbq%K=*@DBY07*KYV%}O=!@ERPzK>ffm+@njw{a&63(w;NhvqA% z57|7pKyfI3Rt1M*qk<5jX8d~J-n8F-$%RCr07@wNXpY*z|2`7Mb0TN=VTBJeJ@*00 z42<5mx&z;P^*rH4@6+(|nl%i4Mf}vP4mV8Yak9|xIG>p3U1)cZ_0@#(xCeZ3xoo<{ zzJE*J4&*bbSctj8po|1yJq+xCcfeJ|bM znxZsDh|z#UPhjz*>a-@;jILx~+CmCXU|iI3i@df!wz`TN9<02A%MV_5+}@ijZ)+DT zr||*m2iYa~Z&LoJ(D^#@UaT5tX--~<4au%zy49pE5j z6qsusFrRNXIGQG|`R1Bm<1}^WnwM&~nA<7Y9jnn}P8GzvwYtpdblu)h*KI2r!)xGw z8XFxM?t-$NT%5088|Abg;@%Y@|GhMYt zWuC8A4*8 z6iVBcYdR{X?L@9gDVOib)+DMJE<5{^0JgZPO;dfi2OH%a=);}Y&rFYv6brFE&3jyX zZPx0iHAY;hvbF1nA>cwqf;4=X6{v722))i8!i*YJNEDhZC2T$aM@ z0c3KAc)o1ole7A@mubwofR`YLpwQjX2I(|ORdc04RphZ20`w2~!GIM);dxm)sfiV< z%fb$C%cP5r2dHGSfgi0nqBgl`?VL$+34slP+`CH1VryNLzFP^dwv5^o)FzRmsl)jO z$*cB$MAi1ApLnQ`+=4SHyP1BODE?K7>cCjBf3{_HA<}hY@4?$v9Y@gZ#g15{@_Brz zy(N9q-pY5l?8u40?kMwozvN?i$c6HpJEOT&vN^UNrL?-F6&nbA&NYbrg=M9RB?~g664QT{LVDqq z_rCf>@5;()hj=djuJY=I;@WH7$L;>ANMfy0%@n zlE^qr=*>H|2dlYIP3C99`$O?1)F&Kigb}Qlg4=LXlSgz5I^42Y-zRq@cuF-YmV45qEpWJh;xki37 z)Rk*gH{m*Rja=3cR6cQQ$dySG)1H-#UA9_^9n%W~%`M~mXOqJtrC8^I@y>x<#7y;e z%q)~*snI=~i~CD`H;i`lrY)!9ZcIeOx{>RjEOc%i>oBZ{`RT?~G?iL{vsf$Nrpd2b#1cmx+ag-!#p=(GkJ z`n-VpPloK+iEB8ow6xLoY!Yd*x=8F8KDhNY-YG9hd&W+>#)dF$+=eyQ^FFWj5%~EK%H6G0b@H^n~scU^${9JLlN$ljlQ&D z%t3=_;wzwtcu=3Nvk65tizFw9t;KqaDAVwV!v}3AL+Hc&!FD`6Y=n$3$J2Grg#xB( z2*R;ttDhX+#ga+$HpqoM;u{n{>9kONN?2|N5#t9dQ7U3gvG<5r|U6HSdxz{US9)0(f>jICz_qim=s$Z5o(t}@hGx_r`+69X9aH0&i zjA)q0*krflyln_jV-zC6vUSyxEawNtDA}>1i97S03q6jc9_JLinty`4imn81?mTk8 z2;D&4x(o~npIjrpgc?yEeNfIsqX8$^sr9@an)L4oe1;Ahc55dHy$U7AmnDn&nI0Eqp ze|YCBPxKx+a{3GM%)^)19R^=Id-h6FIqvNGZpzODI$TG;Y;S3Fz1^_=n_@duk27A_%2g8>Ctj z@|%dB?ScyN+SX~C0&E98WN|pk>XfG4c!)2abTM6;n36!e#i=4LN)X=T$Isp>ad=PP zx-1}g@44l(=gtcV-d_VAXXPWR7hBLT%8`~-tO?TKLlVy-N)AFqdr5-zf-${egkeb} z84}N;&nS8aVIjt(-2`q(9R=3G2ptMpKbp+H#k}X_I329U|ol5SAyxf2Z@A zkS79(D7aktCywFXBk=t;+#5Q!KKjtH56^{WIQ<($ze1i$6WUjfkWo~hQfM9Ebej9& zn2yny#v>L|r}*BXgEVO!c@6;uOYO4rC?^!JYyEhU+T*v;-#{%nU95>_<>?m6YTP_l zzixo9%lRPr>Z0mp8ZVB|#I=j(WL>EKI^o$ybd=nXlSdBUcFW?8<+<^(!BRd;Y)YB0 zbZ}VHwdIA86gfZVldwwC(6AJz9KAjz`q?HR0~4;sO#6TXoEW?^ z-<~4A@NpqA&JcMK-gMnjdn{s@S*<$gc>&`qXy5|`zY!7~`FMK0=0r#b8r@67by1%i zHK_zSPb&_@q{~N^onT2i1=#vG6D?c^0001Z+J%wNYTG~%#y{Ck9Alam3L%uX%qhgw z5^4f5RP#6sHN%zXRJ z>>9v{a|{Rd?<2o8J9xoS%`J2>vE0TRgqAxv#V5-Lc!w{R4{?I;mb;w)W%&rt@yBux zr%uOmAJ3fE?Cf-U9DQ~MmK~7u&2kIJ&QHs2d~kkS?!a&LEgxXk3M?OD*!pU@%lRLc zkMOqr(sB=e`@Q8pUbUm^VzpIrz9{KD9#FWDN|)P}7}8B1dvxLZmo9%s9*xtKj8Kyj zTBwakyzxpr&|Q>knutWMv#FS`(@0%;zV8Rswjd0HN>pjwTGQM^cGFBKEsLDY=Bn|C z`)s5Z3Ei$l9`aC}NJXT@&apOdt-&r!Qd2~wiV~4UYDvX?^H^RZju%3gLeUvbWxS+W zl*(A-r7U+cFD^1%qreJVC`in)KncP*Vhnf>{+QMrY5mQhAH!$Ddnod74PuP6Fhbcc*2ms zJExtG`;X64eW=lCW~S6!Em%!^o$0Z2eVz?#v~eQbnl3rdQ{PmrNcoM7ra9yO1a4!3 zrj|?};)wV*6|V``GoxK{uE711|CaYeJ?5$~wd(YQGjlJJ^Hql#_exDvov~yp|8uK6 z3S@r)h@ji_0001Z+HF>6VBAC&{yxRtwY}?>-b3$%5L)P%*d$JtI3zJ4p#*t%yenHP zInv6GP4B(;-r<0w6M8>-=jcc8yd}Bk495tJ#3+o$7>va@jK>5_#3W3{6s(1{u@2V7 zdRQMDU_)$#jj;(f#b($XQ?Ui6VN2icR@fTbU|Vd58Q2~>U`OnPov{mc#cpWA?wE;Y zv|ta+!fdo+4(1|*Juwe$n2)`%01L4Q?bsWOu@9DDDfY#F*dGUA84iSw$8jy5z>PQ( zkKrvmiHC42?!w);6Dx5XZpR&X8zB!+Z2t18v@e-cL3wRNocoi?>72JY6&cGXZ4X>ks z6VL?%-6+C@h09@MIeOs0g@+P)(T5cnKtB$`AvhTK;0hduLvc8czzw3}4_&e1#wJ4Zg$I_!i$Y z!6d8jJF8hkFvT=$S;u-du#v+#0zY#kM{zXAa4g4hJST7>Cvh^Ta4oLQb?`Ah;ksOp z>vIEc$c?x$H{qt-48P!4ZqBLPg44Jqr*kWA&26|Xx5E#d!R@&NcjQjonY(aT?#3qW z&Y5gx3-{nG&Sop;a4s|4lk?cd`P_>OxR8t3&b_&q`)~=Da$oMp{doYF@j&YAV3s)w zb~4Wb4R*1cCX2Lavxm#+&}E4pd)dbo?B@Uv;=w$Ghw?BU&LemvkK)lhhR58_SMW++g`apeui>@4 zj(_6yyn#3J&%B8@^A_I9+ju+g;GJB_yLdP6;k~?%_wxZh$cOkaAK{~XjDO+de1cE% zDL&0-_$;5}^L&9X@+H2^SNK=H%GdZh-{6~ki*NI9e24GyJ-*Km_#r>y$NYq!@-u$U zFZdcr zmei`WYOO{~X=$xitJCVW2CY#Wu8q(}YNNE#+8AxDHclI_P0%K4leEd<fX_I<{5U zBOJrdWrbA|j>uIu3$vtGw0Mr4)TKs3?{Gw~Na}XpwnTR-n>C!QSL`&!ikfWBF6r4| zb0U}31LbCaiylY;Bt=9aLW&wOsGby(Eg`~fsk}m(AJj_cvv#qlOCeQ=bt(!Sx|1+U zhM5ydBQj!0KMl#Owa(Fuu2fgNDczlSgs@EA${E>&sb{^CNSLAh3e&flu;i2#P7Q0Z z@<}_QwnOS#yWm-Q-SLX1?v)abRCnaT-B3!ovAsk|a+d;MJ?X0_2`fFv@aerMq%nsm2N=kF{P3@0awfjGQW@DI1#)14rX;Z&_*^$K6ih0dpP zw%1{byrcVx=v?Mgn!nsMB+X9}m}XBi%3!;kZT* zav-No*YpD>$5yOTE;kZUVkg9XGY3hzYN(Olja6p742|v#Pl==dxDok^* z+-ZBEdZ*DFuDiyHVBPgo9S~LtBVP!4LPuBgWh7B%HBlxa%0xsNNt9Vllvz!biHI`N zq0Ic$KzO@l%=+VW)b$UIxUFQ$Rx)LmXUZD=^$XeSHv$jMuvlNoqxL5kvT>>#vSsP@8|2{4tdAR{ zT+1o9R>b49P|XU(tWeB0fP*aU+@08mMW!tNRE*jnb_-jiWsxOA(i=q0` zRql(aTz@k@TBBL+N6Jz(EhPxHYS@kq4c6M>@2%l~+EZiMSLSF#e|c~R;$zhhl-()C(ezwZ$f&|B zRal@3^HgD>DlAe3OBK3RLCtX5Q3W;NX*Ht5gq2Uo5Y-NePQ6ifjl0*#L-1CUYgYMM zqfodx8g5DAxtOG8Sw7RvAkv z`8Es34LzQfEqVTH!onucL>w&ONIAw zp=tN(-Sv6l6#b`bhZ!{?Lq5I!07-;K;Q#=5+9iq4E5iXChMz}dZRNU{9Gu<7{5Vi+ zBjrGni=33S!k@6xT8nmBEs_?kXo~-54#H`d{Q=*7)_Qt-d!HA9Gz%=@#p(4WDJH{^ zB#a)<%2(xZj8}Z)PDRp8G0hAU%(6&6{CP0PO^~C;3-k2&W|^O0gH!gvc--MKCiw+QX*7-C#_Uo*4syJVQgl?NSP@UD%arYu{V9Vbx}Qm(XqTW5F{rK9hF^4e z$EW^hg7N^vGaqhv+I^5q3&JoEgwOF;EIEiC>?0QiW2N{w^`zB%Xx17`+mHmQ|6cKl z7QF4Uv-1tZ=WV6ojMk~9p3yjnC}3SQQq|>*?(+3$Lgz`3SLlTlUVNl1h-uOzgz%y< z7puynzpsRQY@Z`pyiPDEhOk#!ixMHf7=*s4%oLXBR9QooEdofjhP&8-MTho`GI%(K zZ%ZD2)3~=nVkPXU3r`KUpd1Kzi=a9DPtoOl1Gq$2SY`aJgD#OpF^d=*r)jp$SDUWc O&o>WWy(U*!r*E{-zpeKG literal 0 HcmV?d00001 diff --git a/docs/_themes/ceph/static/nature.css_t b/docs/_themes/ceph/static/nature.css_t new file mode 100644 index 0000000000..394a6339f1 --- /dev/null +++ b/docs/_themes/ceph/static/nature.css_t @@ -0,0 +1,325 @@ +/* + * nature.css_t + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- nature theme. + * + * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +@font-face { + font-family: 'ApexSansMedium'; + src: url('font/ApexSans-Medium.eot'); + src: url('font/ApexSans-Medium.eot?#iefix') format('embedded-opentype'), + url('font/ApexSans-Medium.woff') format('woff'), + url('font/ApexSans-Medium.ttf') format('truetype'), + url('font/ApexSans-Medium.svg#FontAwesome') format('svg'); + font-weight: normal; + font-style: normal; +} + +@font-face { + font-family: 'ApexSansBook'; + src: url('font/ApexSans-Book.eot'); + src: url('font/ApexSans-Book.eot?#iefix') format('embedded-opentype'), + url('font/ApexSans-Book.woff') format('woff'), + url('font/ApexSans-Book.ttf') format('truetype'), + url('font/ApexSans-Book.svg#FontAwesome') format('svg'); + font-weight: normal; + font-style: normal; +} + +body { + font: 14px/1.4 Helvetica, Arial, sans-serif; + background-color: #E6E8E8; + color: #37424A; + margin: 0; + padding: 0; + border-top: 5px solid #F05C56; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 330px; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #ffffff; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 30px 30px; +} + +div.footer { + color: #222B31; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #444; + text-decoration: underline; +} + +div.related { + background-color: #80D2DC; + line-height: 32px; + color: #37424A; + // text-shadow: 0px 1px 0 #444; + font-size: 100%; + border-top: #9C4850 5px solid; +} + +div.related a { + color: #37424A; + text-decoration: none; +} + +div.related a:hover { + color: #fff; + // text-decoration: underline; +} + +div.sphinxsidebar { + // font-size: 100%; + line-height: 1.5em; + width: 330px; +} + +div.sphinxsidebarwrapper{ + padding: 20px 0; + background-color: #efefef; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: ApexSansMedium; + color: #e6e8e8; + font-size: 1.2em; + font-weight: normal; + margin: 0; + padding: 5px 10px; + background-color: #5e6a71; + // text-shadow: 1px 1px 0 white; + text-transform: uppercase; +} + +div.sphinxsidebar h4{ + font-size: 1.1em; +} + +div.sphinxsidebar h3 a { + color: #e6e8e8; +} + + +div.sphinxsidebar p { + color: #888; + padding: 5px 20px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 5px 10px 20px; + padding: 0; + color: #000; +} + +div.sphinxsidebar a { + color: #444; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #F05C56; + text-decoration: none; +} + +a:hover { + color: #F05C56; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + // font-family: ApexSansMedium; + // background-color: #80D2DC; + // font-weight: normal; + // color: #37424a; + margin: 30px 0px 10px 0px; + padding: 5px 0 5px 0px; + // text-shadow: 0px 1px 0 white; + text-transform: uppercase; +} + +div.body h1 { font: 20px/2.0 ApexSansBook; color: #37424A; border-top: 20px solid white; margin-top: 0; } +div.body h2 { font: 18px/1.8 ApexSansMedium; background-color: #5E6A71; color: #E6E8E8; padding: 5px 10px; } +div.body h3 { font: 16px/1.6 ApexSansMedium; color: #37424A; } +div.body h4 { font: 14px/1.4 Helvetica, Arial, sans-serif; color: #37424A; } +div.body h5 { font: 12px/1.2 Helvetica, Arial, sans-serif; color: #37424A; } +div.body h6 { font-size: 100%; color: #37424A; } + +// div.body h2 { font-size: 150%; background-color: #E6E8E8; color: #37424A; } +// div.body h3 { font-size: 120%; background-color: #E6E8E8; color: #37424A; } +// div.body h4 { font-size: 110%; background-color: #E6E8E8; color: #37424A; } +// div.body h5 { font-size: 100%; background-color: #E6E8E8; color: #37424A; } +// div.body h6 { font-size: 100%; background-color: #E6E8E8; color: #37424A; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + background-color: #e6e8e8; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #efefef; +} + +div.warning { + background-color: #F05C56; + border: 1px solid #9C4850; + color: #fff; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: White; + color: #222; + line-height: 1.2em; + border: 1px solid #5e6a71; + font-size: 1.1em; + margin: 1.5em; + -webkit-box-shadow: 1px 1px 1px #e6e8e8; + -moz-box-shadow: 1px 1px 1px #e6e8e8; +} + +tt { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ + font-size: 15px; + font-family: monospace; +} + +.viewcode-back { + font-family: Arial, sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} + +table.docutils { + margin: 1.5em; +} + +div.sidebar { + border: 1px solid #5E6A71; + background-color: #E6E8E8; +} + +div.admonition.tip { + background-color: #80D2DC; + border: 1px solid #55AEBA; +} + +div.admonition.important { + background-color: #F05C56; + border: 1px solid #9C4850; + color: #fff; +} + +div.tip tt.literal { + background-color: #55aeba; + color: #fff; +} + +div.important tt.literal { + background-color: #9C4850; + color: #fff; +} + +h2 .literal { + color: #fff; + background-color: #37424a; +} + +dl.glossary dt { + font-size: 1.0em; + padding-top:20px; + +} \ No newline at end of file diff --git a/docs/_themes/ceph/theme.conf b/docs/_themes/ceph/theme.conf new file mode 100644 index 0000000000..1cc4004464 --- /dev/null +++ b/docs/_themes/ceph/theme.conf @@ -0,0 +1,4 @@ +[theme] +inherit = basic +stylesheet = nature.css +pygments_style = tango diff --git a/docs/cephlab.png b/docs/cephlab.png new file mode 100644 index 0000000000000000000000000000000000000000..4cdcea286d120cd414a18f86b73622f113362eac GIT binary patch literal 36032 zcmb@ucRbbq`#=7ahBP=R4Wq1vc^vDIti!SQ&d548$2qo8Sx1A=P?U^BNRpC_LPm*X zhDa3I%F6s*&+7GgkMHO6-|u#-h~s%4&&Tt6T-W`&-|v@LeO=-%#siE9g6z^zCm13K zJrP0Zv>3L+S2#x&TM>j|)K}Hq*E5jh>fwa&<5d29#xE}B;!XAC#}W9&#qG#s5l2@$ zXNsLCRm9857rq4F_w;sjCAm5|{&|hKn7Ej*n2fNvj4@V*AEzXShkqobL?mVKmVaJv z=j`P5_kxllVsL?@=2+~pKi_lmwR8UaUIrAyKoxJCl7Wn~x3Gy=pst$6pV#<1QK+uo zUVmPO#fiYR(Qn{XVbOnA2N!kyyB-TQH7!*yOAEaq9Vvp8v#~DKNaxRWyzD%k{#-;K zmL!b*a}jA%Uq?qg)mv3x3`=qk()0I(C93IqdK%#z^?mLA?cFuZa3rd#iLa__keG^& zIc$NNKUq!H)Y&LV+tSBK%-)e-T-lWvNK_)K=;0haRW$;M(tg+=c%6x|vZ|Mur-p-p zTVMdi9BxRBzRRc1` zOv_D~pktz@VIJ)6?hp{{NeMDg@vu}g^T(5M&hT0f9b>A7ik$?}U5~0Fi8pj5YODBp zsrrghB<%y0d^P+%+{FVmaUSl%x91K#=gT(3B>b zo2v#nsClV+c(^%W-5n_E0b*qIcKo!AG=kxylBI@+rMfY}N5WOw*}xF5A+0X%3QN@T zG1Un06|<1=F(Rw`I_nWsEPQ>jDuzz*Jyi{7tT(#DWcbI=-O^YYJ{n0%IQr|t6^*rA z?WEx&+277o2XCt6Y((^O3xa3psuiR`P*-x4G8ZFz8|zB=m<0v9zzbCauu@8R0}V^@ z02g~DWp_76BR3Z!Mb+QK$Vh}d@N>ciQ0>hu+$2fbcoP*#Q!^)fH7edi z%1GLRA}ML9Zx>|j?PBEaL6pQPk<4^7rIbvuO1?f$PU2z#Dk^S7F9}m$yt*cqg4K7_ zwevNSQup^YCEHt?NZFe>x@r0P_(E9V?Qvo%DqgOhs%lbd?l^x(Hz`kDJ*>BvtBkUR zg^MIb&r2y-+(VCIpss;4m)4{BQzZkXb;R_89mx*rx=!Ze(t3_EVsL?Y$+XU7hq~RMkwxNGeX6 z`r7^`(k6zMI5!X2TYqmCS93cxQ%6ZxZ}be1q{NKLM&tl*f{F>nOi$lKNlTY(>SJUo z;es>9IYW49_}H7`%!y>YxRk%CWFSdRMU_mnr>f$;e2kndsWLj2! z0~LEid@#w=-qK$iOK`@kn@f9S`}7rmd=}Vec5Iq~fLStW9)sb_<|rSXfAC zIcvK+NU1pM273}colTTss7IjU4+BWVwHD#gsyjcRHk?chVg`9dOw1?ovE8TgP@ z{Z%zQjg<)IMuE~k{^*9N8R49a9lfdgUcMS;_BdT18IqowBo)3xb|Cvo8%t_BqyNzk zG&6J6(1y=k989D=ygZE^gN=-}u|Ym0LvoP1g^Gv0tBII1!PGB6h2l*xBIEs~?UaK2 zgPq_cDvRk7jC}EdQWPaAcesR`nYNd>m^Q%(FXa+M2zHQA!WmJ0%!tmK-dH69?4z1Y zu!^1sEKgb8!$=9I51(iRxJf#Z^>D^68s;8K@Dh7-C3_P;FFhwoA3t?zC%n6wyPgTz z627JEZE0jc74wqxag~-*^YbB^=;}IR9qfz@|H%yn}rM%?um^gM+04#mHuMruLFhC!K>G z_00pNaAM->#;#7T-Xs%CZ?gb1oSw3(BQeNK#n0Eph>FA7tBR>xYG|99!agdC+pDPH zTmv=Li8wTV#HD43RCB6>yCE(R>t*LogqKKLIEa~Q1Y<3vr6k>5wJDlLgkV2y4K;1; zU_DC@ym^qfgQl*J9$qOR*ic~xaP=J>0eRyrspRNfHDPlBKRO34X-M!N5>C zSjtb`+(FGy##aZ8EciopclHk!7xR#?^tB*6k#Wj?I2SW7_=c*gvjZ$#&BEMY!p}li z+mDDogkU|qw3n%+xi3MV;^K>DUS%IYoS3p!pgG=1+?3!%#o9@^Ione`#f}? zaUUgracx&G%Ronhvz>>pw77*j!QQ~ajHIpYCh0-Z3iQ^N)K{m9S(v+`MHTNV?&E>S z>-gc&RVx{wH68t<0{{C55BvrP@c+M{2B(@ln5&H-hY$^dl5t?_#HSD|W54BXGe_7A zFnf#@CG_iG{fO4QD|E~8IE7j&#LRHvt7UXo+OZP<^dE^|Dw^CRMf1zDo$f`Y`2V_> zx7yw*>Fx6H>fP*&t;Ykt{akSvZg_c?ipPyKHT?{$_;wFR{nj-2D{Mq=HdpTVH-b;(Ll(+EPfgv?A%tFb-Tdc`lOxZZ zIdkWrgvMwGLY5x_W z)BDu(U|(OKl=`R6f4=QI*)uPc=HO`4RcIR%AK$V+xA)ect?Ar+&+w+G-;dKwY>cnN z?|HfZS&DgER`bD$O|F`uxIbTT)E+AzOkbG#v?Dw`JTf|3v*MTP1wPE&s8^K;K}i1bo#A_U#9J5ZM`KMFRx69XGsur zO*~S!uUI-ROCxd($mfQ|EA`syLWa+f9u!ucol>M@$g~`s&E?F)> zr01D;{P(u|^vH#a7qMZhfqX}f@QRDGgs=1t5H1p}Ytj4KUm%UC(7W~V?n4&u9S)Z) zUMTX6nT@Rb@*EIqecF(aqoJszL`P#dDCL155ESW7Z?7-SciGY*r=EH!sj6;4KG$77 zI5R)L$L2-YTJYv=+cLAWw<3?+I*z0Fan_&mgz{B0-=bJ8@YAb^Z9eO(LCCGmZwCgr zfBpJ}oQ10$787H>cI}#eUI*>Mh4WQaRWm(Tud+4WlH7slTUs(g+zo&GcHsc)*(Bb& zeYn>t1c@k#VkHQtTFGb{C3ch#gV!h*rW|r((NE#{^ddknPzbL0_PQ`Vl6o@q!1nFi zX_qcVYTi2^5kc=(SS;Z?sxM?+W*lhk=Ls ze0V*Ec3r&!a|ugwUS=_PyRxyENKxSvfR*X%BS(%LDX6cmuAcdzC7KR<)ZgFV{n+W^ zOlndRDq=K4R7SvLKDYz`#IVTbp2S&!eoYEao?6%$oV0*#|uZOkcGL(y=c4 zF_Jd|PwjJPx+AYisA6r!8G6$;6LdEDzT?=kdGqOykDW?=zq8O7q`mr(yZFrygMNNf zrbu{nixpVhc@Z%E<=H?O{auYX*vE9!yxDdmJ-eyC=hYPX;JKG^oN|#XylR)26EpAL zJtSoDm=LJj)zsReupxB9*2E-AE?|nbH{SAhz)8OOx?VLI+o1Dim)#|-z|5fxPlAN$ z*sdZmgQTMqk*@(JP3z;p7>Z?jVFHn`jzbCqok|58~Gf=9fn}GH?}U%k7L-_ zBFDzuke71PSus#b9u~n!}f;2 zjmMlB0(2!Y9R(J}TQiCAYH24!pFHT8?0e2y5j?MKWyO)Lm$sWoBu2)>4E0xpKrEY= zk`MFn^3FElS`Nv}b9Que$OO(D+O%oY=fS~quP?GYbUn15`SC+}aq3f%|DF+44oBME`Mu9uo!7RD`ySBa)&Ti)_(96yi78Efm~(@*LqxBjHL{#Yu}dLqj&Nb-zIu`pC7{-$jdLU zFAl<^-E~k3LB739iFxYTYd=sKS{kw_l97?2NGPR_>^>Xa*xHe2hIMN{x_Qf%!+8Au z@Lz9jpCo3Mm!BZ0sqsik?s2HQq~zr#f}CDo{pB)HDW9!<^Ze3MVCnPcgWvC`Nx5^N z+n>x?rbTG}^S~~W4`o!r<5tV5O7iN=IwU7|u)wmU8X|=b8SgBpafp`}uyT>);ZatP zL(tl=vM@=HT)lQp@+OC>s;V8t4supueTg2@4{KuPg$s;F*%;5vzGg?xCLPhsFuJdc z-U9MYC@DUk3Gx(qq?uFumPF$v19S&T;ii~Y#a!w0kp5JoSX<2uy(8(yxOC0l=4&YV z`%A(0M6KMrckd9->Rg*1a!D`l&6_tf4ZQK6;dEBJk2iBn4pea$oO%{*S+Muc;{5#U z`)Of}ib)(g$zn>Y3Z{*!jNdIza z_^5&cHzD#OyC`E}K}d5e6s5t5iA%M$gu%Vc%x85{q$JHdS$@YZJaf{KTN)K~OtTD_ z+=D^Qu++7inIVf)uJ1Tr*VcZnVNzhbGu>bDLJ3DC#&KY5P6d1$?e(sTf;7_k^y>BN z8Rg|1$Ppr3M`W6{b%v0Tb}+>@3gWM(A?Y{?mN*v`vr>qT6SDfNB^j7>chQF$KXlE z#l=Ae$&LfF_Q^_cqpNCt&-;hgFJ3?bT&!G~lDoL;Gu+FLhgQ$e zr-dzV4+;uG$Xj0isOJjtIEMLk)3uvR=i6FQllf&%c(5LxGw-Aa4jnp#pz(hvbiv{h z2i`ExlJhS9ga~{Upi8ns_({lEVo*CcWPKU1SnMGl0+BdW9kqjji75sOF$)U|f>dp+ z%c;e4UAC)K+0lGgOs{}RxY#XtTh`C*#97h2ygXs?)400i3Ym}vAvQ+UqL`R^cKi5G zSHac5t+g4u>k!68Ql33oBBG*cuF*bc_F#0R zYl1JAB+;Olc>sD<~7_cnaIhndDOAXtb-E zPL=JKPcxw(xAdf9>-YwVgqqh@b_NEmA%;wS@Kf<{tNxd1<+U7bPKs$Y-)Xdl-rkj+ z8)D8jP#q!w9J}J4;T2lr~ zy8Ava4^KeA;>2X{Gp$cMr99#E1B$2#{&Ni;8f|xMi8wcR+C)XjqNcnT?2+{g^5A*d z6DQ7GH+PW3a0+A`=6Tq{97~vEqQ5>pJ~9$uF(&oMDRg=67+*$xQ+q#@>s)|`A|J|D zuW5eu%-!Y*H4-`7qI#e=mX^)Z`hw@7p6zaevBT)a@`?(h`Z&&0Jbv`u896yQawkuo z9BO(GZ2;+1neX?QtA5a$|%;_a`Ix$o*-Ono|#&fZ5+qsV%3hhH_ z*jicp>ja0oHASvh|{n(MuRyHTWq_*ndTO%{*I%s}TUAz8-k>$y&QD5@Y5V@6x2=s@Y?xb4 zt}44V9Qv}yOY@$%ZLYD+%-md``kAAg4o!7BygaR8beqWiR<<*FW`)tk#W+Zu5f1fn zgJT_e-+~1-tSf@|K(ctHC0Y@=SJ?7%#bWKT<@#BN&a*4IODwFc8;3Gt?Jxh_>)1Z% zYx*XE_Ox%~21HvVLGG0l9v@WxeD|rq^?=Q86yl?HuwSUOasTlx#hXW7{!>mDWmWqkck#}t``;$-!)Xk;YVIKKN}_TQ ztkOhUm|LZARWYeolfE%OrQ}LSLWQ;vWSQkT2leInbN|U(Ikuy)7f*X_6UX1ac~gB$ z(tT&}uKH;@hcER)5i8&6NfY;7?Jv(0d?RU_2v)YC3%)X(38c;(FG_7Onml*hIrF12 zt#(yFR8*7*EljP|y$&jcqU`5aV)K@=dO(~ntVz5yP0BGl?AEa_g9aDg z>r9kdm6Gj>Xbz!FJj;c&ao)#OHQ#-UqCd*R`EZ>i*q}dXU-+Lt^=_w6Bb!hDv^qf= z9<@A|?)@FV%b8a2;?&y3)G%rkRrkhxSm((CtDbjuQnh)27X-`8%h$VFTZ_&=S=m3}7)bT?6`KBV?=6#v zh{#)7Lvc~G)3-L&;rZBQCy|9^kElwY!KLqYJ_+nzOrM&VZ)g|Qhp^i8rqUkN_Anp3 zqQpdsKdymGsZG{^>)K4ePgnT}nZsxHi)vp*h}i@->gKE4aslMQ>Mi*;6$c(ZeCT9d z<{M-(R#~h4vSUn0WuTP2>N`~0u;W>b7m22o-Jr6*mDT3OMOwydxW0j0NNurNlOyyH zw{Y&A`SwNIFr*+T{i0!CVvmqCKiyAGc)4Z2kn+D@#=c%vtz&G7Ec;1Z9hXuEKhI}l zW8)3|;R`}LU8IIv&lA^Yfm<6XrIxq%{MXhUZrdHfx`SJ{ONPkPqfQX!>)%cKR1RE^ zrYh%&WT7XXy-zRE`tD7gUQlco)p}^^y#K$RvVHubqg9D)?(X~J|G{DLMI=2en9Mdl z{xf7@BIwnpPx2|WQuJ8lUPX=*;qpmH+)Na4J^fueqjonVTloe21qhP*+K|SyaX*n*&-hT&IYm=b^L>Wu4v}tqPuNVSe?P&$k5*8Fbe839wwSj0 z@qw!F^ll|brn&EJBaE2&r{oupvo$^mC+|-<(ah--jj%gDVhA0oo^884#3S0rRTj(1 zzCEu)gCcV~FK_%CP9mj2)YBXsPF4)v4J zsImiTaEPbxIh8mf4ynBE>M^4yTyNjKQ~lePCOy>~DDBo+?4;K9JX}3Uu|Nr~0}U#|x`AA;6nV^57q|TwVH98G6t* z?AN_c2S_DLP>e72?8U#1YRr_sq!Bl^W7n?N?u8XwnV6W^_Uzdn|CS%0c-QhaUrbxa zVYk&}uUa{G3a{S{c%-XA#SAHVrqBGwO)qiE?PV3vewkyCIWrWmkdc+O8J;qN0cgD| zaCNpm-T3(Iboj=>{rmUN%+9_7@HGom6M~SMQ?~=FXcr?OEW8U?ts1xhgs;B2`O%3! z@17^y0sNhN_UzfC;yWoR7lGSZT3M0qKKc9zkrL017@L|_x3*&Hy`4<4`?B(TF?!(akRr(0xqzEbe2??k}{&DmY-All& zVvY^W5$jLOc3$3r6IJPPXw6VfmiFdC7}g$|s;O$!wUmgqiBpps|vV<$kgS@<1 z=-@KpR8>EocNRIAf;T+M21r;Ia#IYngKT&_z8XDVE-pur&%=#LWD134cW?3Fa854k zEWcdp!`%_|zH_4nOG`_%y<{wjZ#;PF`3P_?Ob|Ih=$>H#_of zS@FyHosyE0qwTq@;^N}uUq74*M%)6TMXbvg`X!Ug->j}5iC62nQu`Y8X7Pa zzA`C|ysX+-^Y$HU*9h!Cki_dw8-OAvM*ge_&-TsTssa&}q3Wi$R~F1Gf+d6J$D*M1 zU@Vp?fpCl{v_C3d$7FhD1Fbltrlabl?!hiBZaN0p>FXX)f z{=th%$aeYWMarcE#uQ^~>&tLzva++^2;g~sp2=M}l5tjc@f5Q`)c*eZeOq7OfAomC zCWeWaXIcQ~?#lJ+TM^iWx~18n1Yq168q`-Oy~F8|)rDT~eJ9V9UeUgNB8cVOxpNG= zcW*^9cXw?*3tG=1A)%eejvYg_1c3OFBV$L1I>I5N?+@=hAi)4vuiqhBj(}LQbHDgz zSY~8lp(uL7&djwEk+XAizJY;(r=ehMeZBMZZOQ%OrMq66D&UttM=+tycP#4>9x184 z(A&`64Q1$uqm1%jbCbR78%y;Hh|6yN`I(uQFVE4Z8|FR=n6^P)KBta|`A?X`P4fr| zY0L0cExd1YP7B=&#fD+$PJ*wmgx5f20adQUO%gh#$k9<35GEWxWNB0fC>tBIrKYAn z^7tr@l83^UPhJA?;`ucB`^BZ7B#e|v!NcrqHJRzJzyRy#8t;?y7eaz2`sjb?#tm~- zf%*Z9K8f7@Xm?CVVrn@qx-RPGjl+{ZD+``E9d7VvqQByxuCDIVV%5ejMH>jL+|uqc zKX*t6J#G|M%= zFAd%TnuP4<_j-Q3iw;Q(UDyxUS(s1HFDWzoy0EZtR7^|^*#$)pvp{z-tj$0XP=m4x z+&P_#W^dJO-NCK(JAUu?1J2jR3iuun1N1-s{G+CHSYDq>YuV|t><8>b6p!h=a~zzI zyZT2XTbh7|1@=bHZsW`Ikvlbz?c;#DqI-WPY~#m`s?ok?SkRDO_;S+BuUjWWWkIA6 zI-w9aa~im5;2Al#F>%$tn67kJHm+6rT|gd~1`O5h0$qt=QDJ$Mx3;cslUV3Te_I=? zb93se8@6Gwz)Q6YB=O?wy@8ZW#yV^T-ai5;{m&`hm8dYH6mBbAv~mo0`S4`IojY-W zmo~lU`w1KM8e#`YzRnQ}LYKO_I)d0aceW2RAaHoL@Fc2I9JAg0(l8*HRp|Y||K9Qk zX%oiA#&<1>NtQR9l1~JQXX~V7Q-ea2GV6ACotf`2E2;Yo2l0tZ>!yw6t}4U@jdTzv zY=90?Qx8KFbB!6-9zliR;^IOzGDPQGf^8Uf>Z;wB+mg?*e|B0nbXfujDCYh9w_nrT zvTfV8cAJQNDf|N+5^_jPOkZZ;c^KtM_(m9%1DE+pAS4g$*eitG{dKA7wwzhu6gv9WZR!*HH9)~3U;K#)Cg@7i5hwJr-ZW(1UE4#ucW&8&J8|L!l<)g~m3N?a(lES^2^_CZbd2ftZ7l9~ebDxcw#t<)CZjl? zblWz3&16(MHZ~SjveeZXvUP7i`1AA=r=C1{vUOk=h;rQk0+C55bxzVA-FN+TkG>bX znFdk04&i?n9zD>F%aOz*zaMy+@M}Y9{Da(F05Q@}JwEcBn1)An{`^#7(T#t+0Oz;9 z?PP=+v2YvmCZtD?_s>{aSRyahbt%lvZLCjk0C|sMzsZTtf)ujNaU>N_-Aagb?|Mk* zvUD?n{!2^Bd_tM!?TYvR-d*nhl6dULm8x?zuf%U}Ep-UbXgR~{8{V{4n>x7H%kMbW zR)4lsJAf6d`qB`bllRZ5&V$dP@4R`Hqhg`QeHST$gOk&*nM1N2dC?U|K-uaNX!rHmy&QBLH?Rix=EV4gg)+r5Tsg-(;YusF;Z(4uq|E zMpVk%*V5FS<0_ZO^58@Cu7lb0yyJk@x=()tPLm|waQ9Gz{D0g+w+~jOM0VQ;K4m{= zU%_dGg$oVSPCFwUuHLw@Eg&EOQ6!NBe*FBIaO>8ErlxJSo;PpatWIxz<~^VWNee~U z$gQ6T#{`JcTn~e$`tTJkcsI9?j~|QsPWA0a6DFebp({i6;#Rp}6ruwen(fzWuhLo* z(5YsbQFQa=@B13gfusdExf-vmusTCeQy0Vs4<`^G1^px;w^Sy^?@;GJWBUwvi3n0V zs=NRyL?w*1`7Y!v@GjCaBRS9y92FMcf-4r(?4IW}p8!@`aN*bmbUU1-Q%Z!4Lw zG9Rnqs9qImXiL6qeCdvm0Lb0U@#%0zJRyw9Xdee}iRI--kHiMW%HXWCv9t36W(K4S zv04iu5ovp!?%l5|>+9>uGCn-wI9zMG%C_IkGlRFzW^b|vv<8GWS{l-}4H>{YyHxS!*GSOJo(P>$7L+=$}0J0MhE=;VOLqYaK7_Y9Av(p$Q(Ezc`wI0?CR7k*mtw!zYRG@(myufdSBXWga!ys7m*5Nf{PKd8#I zkAkAI1wmVe^76i{#lt!ZWWLGUPkQPLHM&;)-#&?RuuJ9GAN?0tG~4_?U3o}3dr+u7 zt6Z4r*TV2MLAd=Z5bIcny3au0R8v)AcE5(V@H9LeZJG6b+d=i>Ow+~H;<4bxU!?Su zlAH>L{D_DM@<(Q)cAL#l1E%|zhLiLN-O9g;YFc^&94G$|CSc&ukL6kc|JCH(Ss%~& z@MS#D!jGH_Z@hl}cVqc+U}Tq}_u0mOu{lX0R-ntF2g2CI}cYQfTqU z%NdIJ^=oe5D3kN#uN_e|zs0wvxU&SOV92reS@68uprr;yZDBDei4Xt9&CgW*H>(8e z0L5F0&|T`2**pT3fsseq((+dG2;e*@NT}+^@KtA5D3exVNQk=XMCldb)lJU(42JH< zFFTc8HZe8L4xnsC!2CebjB|U(pTYRSYR5&3ICyKJx>TI;#{Xz1s!iL)))a@GM;PpX}M*W!dV~l#C6WD5^j3>3MK> zh)n6^)b)fswLCeZ+ z6G-RdvoLi)47gk*RRRPMu(cAi|FQ8|oXo_sbS zc4moe=R5_?9;Al$vTl#H;iICWOyF=(#zyzp)-F!=#z57s0cVYO`RrB{-%Gf6Hf6~q ztgf7^z~;(x3xE%S5D6_sdwct5fbr0GM?yh{g`r*wfC`|~sE-$CKrQ$EZipy?U>-a- zO2?xNIxfKNZ9LWR8W5|YVE{Q=>7F6#Vu9YD2twV54j z_~-DYHWrKkd){=9Tb`7bW=9+U#YOh@=DMd9Q_iCuX11!&ygrG!ygy`B=G&O>Q>co@ z*YNb$|B0`EBR{)`DjLRFQ(Di-o>_Wi`U4xX;9++iy4O9TRjZF@thBXv?&dzZ6J+FL zY>ZaGCjsuR$&5dFJay&9jT@*m&$zucWV#{-vN`e+;B6#mexHAQeT$qm$uk8NxMSm9 z2uN_31&Dk0?9o5UhW3JpOJc6>ZQ1Qz@^2a&&w&6$p-}Fc<|SOc>gbz_yy&w?l1u6_ z;znIOuuq7Kh;Cnb5Owfp5qVHKRmNx2*>A;(U`Ii~(E%Dr*LB67gfI)m)HhX@2yam=%{TSq{YHHc_md3_b&A3}V z3q39GnG{wLMI)9fe+w!voC}MMfw%uVYh3xxG*8aw%VywqBmBlYT{(M94Ghka-raKL zj0;BDXdtJ6bGaqslQGlxw+BaF<`q_=d5@vR`Z;9_Au=O_r+oFtTN;hVeCNfPUyoZG zTY=eYg-!~3tid2#Tid}NH6ST6xC{wTAN{mHMR4z}D~DZF78mpClL3lu5H#e?2%mt! zrob5seDGY(Oc|WjgPCfVH_aTvKM1#(pti`zz*#=T&#yXKTw?FwfWctSZLE$cSjLZQ z`s=2hyc`+18C(;}_Y7aXdPN7p6+rYKLe&-aBGN8hz?7pSmuzu3J zb7S%R4sr9ye`o4z<93+k;hIL$!sDP%mywejdd+T2aCJQvwl;6Zj1jRZ&-p1i--6eK zA`c}SLBMr0+Fv0Die5r;G7Xjh#daIoDMfB)6$6jmpdY+ioDbhq>cg9Q`*zf#20&Rw zMFb*j^W8zw;cT8$-9bgd86}<$0L28+`X4aW_X?Hggik#^J@fh1(c{Nij#qwP&M)6s zUqQ(`I*BlwPv~|4@Z*t@DSees0^S<1F(XiSbJGLdD~r?H-p~9rFfyVCyPU_z$Etx% zjg8y4Y}q1vW7`u2cBbBi<+OA-oUm8lxfNNjtcCVN@p-8G^1l_4!*Z*JlBRI`!1zLI zuk{mp0XP?a>lv#n(^W$NuaH~1aW)~7C0p6cghWKtbaZyv1by8D&E<~xt~9(XM3!v8 zl+cMP5HHn|GQF$T_Ii1F?OMO{;|B#?av$khd!Ay!5aRNwOw#nBRV->ng{zW%eQV); zqH6Gdp{D)8;{{uB#InRpu%Hb>>onS3#9O((>^}Is=Vls|tMd8IQ>Zz{&CTrqpBz-0 zO&IiLh{PVN6lw2=0hGB%o+HBz8TnO*=z+A~`CoK7C^L@--TdVIClsN3B`bh#4CMFL zyLk5Bjy5Xzc+igLY<5y6I1OIa)TpRuUAS=J^T;#f` zuun>HrPvT$a)ACMWk%G^D8{F*90vCXu;A6QD$2^J4N}o4xEHD#pnH^d2ng7QIFd*_ zPykS9{44AaG+sK>VS7?R!FB{syzD7=gt*sftq~Kun!a=Uc8p)>)Bh5GqVhVvPeb$q z_ER-9WPyW%dPMC6a{qYCpkY^p&K0dd$V)H@x8|9NE-f$HgEhnaUtQkdOd1eSr{_jn z_epzk7$3jZDcS~p6%4piBA`b^J)i`X;kkPCD)^#W(wKmT(oU7ZK(ByW9MRZ0^YfiF z0|UcZlrZV92l5CS^VqB`KD223{Pqqx3mzo9KmDyL)XW*_o9BlIm?kbadOnCpjen@` za7xswlo?v$PRr~mXMQj{Lh zW)U|BWd0W**Z0Yg4}l(_h#sVe8DkuJ#h zUqoRDr$RP;V-GjTGLo1vNYa z0$e-Xafh7fQ`Iq%S535u>7S&va8ZIt!ekS@wiCYJIjVbb0{S|aqH z*Kl;UebC?E?+hn}aQSc>pe{+EpHr{@xfaMckBfa$J<<KZh-!eZHIv?L%X^Bow$6xBqG`K=&@r;7xxOkxi{soS72rOW(>;uFCb}t4#s8hyZ|LeLz|3 zdgj?1J>FHAB3CB`R#kT(Ed{}0^>A_GBhF;(snf1wY!_HiRbqSn$p4yrRw7?=%?;~ zDSqbWH$S%k2AF)~#zyk_ zE6H9A4Sdq4bV2%>Ux_UEgT5S!t#nLQD!rVXd&1h<`fV1Mck&YS18-t;GTS+us`>V_ zs)ia-jq^}no-E|%#Gn#c!Pku#oCRcdo7=!)rnx(-d}wR)4hagX5b_&HJUf4cNU-9M z0t0Eo?z|2Q-ne6|s;5tX&WGsUt0g}r)~0h){HrN^MIv|QDgoZ1Als7D;X3q3QC#xD zpTVmsn+8W$RbQWZb#3jS_3v)iqxb1MtCWW1bX3$;wc#F07RUH~|M0Z~Liw7gZ6(J@ zOBzvQyPkkW1c-*I3gKLE!-~M03r_gfw@nSgRbO2hNmQ{9-;l%c6FUlKc@K2LLUa=umH`dWUea}Euu zd?1+yp1F64T%4HZ$Xw13*WS$Tcv|XBGFUJ1ML*sAI;)ohHYW=QMbe%!8))T9*J_sV z=&UBsmcs4YgCAx$ic_Eq$;mlt1;`B$r3&P4nD>aUTAweRSy-rpYz2JCSxC_+En}3e zjp@WvziGo1hdrR?qe4Qp@QT(4CW4^HW!<|+7kGuE0w^RD18v=PAX4jw2pxjXCfqZO z1sz3;g}!c#w}1BO);=|D-E52jKc+%>KH5AtDl2y2A{fs16tAD(X;l!XF$tFi5o7NZ zxVa8Ovj*0365u5tP%R+4oL-t4gqra3@neg)SoF z$rCok8fqj;RPD!+7#3rgxWU%!|NA}09(u#!23?~%R5vcU$?;D~sKYw_FpzUM4jKB$ zThrcrprb!*rbffASwxO&Z1WOo?2kkRt|>&I6Fgg$>!qv6Bh5e?%GPapBV z^myePEaq;k4JMRtTF(tk9WnCBk`FH1?#J9i|NCaSoN*?=3*tL_+cbY;VQ%GfsI-7&?WUAnR4a!l&N$BGxI87l0E zj?y{X>5Vk!__<60o$?|t;erX2%zQT3ktSmRL~4JyFlKJ~|5=4Sx*ySjRsTFFU#RHmXTER9&F_baE}@Gy%IMkyfBmzLXZ0N=J&R^--NmE% zBD^Y9_J5yw+Mum#BbEKEJCkS!5@eTsVj&jr2NQ3 z(<9e!X#5;MK^u)<*)U~k6*Tl>b6D&UAeL}s^)L&zmN##V3lN)RI@36gU3+sTHvmDs z<+ZS(1q-WjJ*S>ONvTP`8%C*}t8YyBo?m5Yjk;6!?%AVJ!Sr&ULw-7YEyr|h5--;2 zeZj*Ks zczu{b(Kfeg4cuz3y+A(zV_&8U-nh=Jogc1le6`>eI0Dx!l*IU4({D2mFOlWpLCmuH zF<`7;P_(zV$I9oNd_CNlK$BuI*b)0IqNwbDu6%wcp?v`~sea-4IK<&n(YBtao*_4A zCsetn3vw;36+}e~JpnMT7D6dGGWliOnYN+-#p`n%>=P$++1jeY@Ql17Cn z?7+mib^G{vNB;WrD@#i!q+QfBH%a15elQ=LKxFy$x zG14Pfs3sBMGVe=rBpuL=yMzXTN2aB<^#`ZpdvswqVW%c4U~kTF9H;|F2g(hnXyFlBD#C2{ zq2UH!DWd}2Bgh(Z2A%NAt*3zb@EEuya)2K{wlF*U(IXdod$`KTLw{%6hYz2@nGdpb zO768LRR0Fl4F-ly$i$}?>_DGGKN33X9?vCz;g8?s;Cw&dM%YX3n#P1#^f`uJ5(S%E zFxqr|1=V}etFP3q{dChlhlQ~>*94bfOagT;Af=m6`v8EDVm9F3!8AfqzzlruKF-Dn z1_F@qqrjx!|CXJNjiC{Pz&zFXH4ZvO7%@6R%*Udk&yTsLQGqdSepq+IpD+gv0QCr> zj$GklVWC7$*~W$dAKr^NZfcnO`BUy~{Li6P85kzy#qseC(~66Kazh?1mvQ}&bxgL- zbU`tK^Hy(uMQu>Ue{Q^s33x2= zcDHxmA49_j;v?FKfnq7NrmH2qMJRmo&A z(A*w@fH(MSs6HN@`2*Sl<@fFcIhjN=pl0 z-w(qN0H)~>5a-cJ8ssc!bBAEq@$A&iOrT2cW@IRvcXoA|1wPCjcKQ0|nt3tl@=V`j zr?<@e_R&BD0xAj!Rm#5>$%%Sg>++-fsuXtix0(U1w>(}LwE{i9DwGZc)ldURZU;n; z05^9bY;Bni`CkVDBb`7)&zDSThXagG7$Pw7bP=8gP*WSNJ$tPiTVduh;mVb>@GJo| z$;imuU9i->%unPFTRIGbVh>C199D_`U1zIcYjoqx9sWt5lWu;nLXQi50T*`lw-$ys zw?f^Ut!33i5Rk{BVSths1|@)O2IlQ0>=qp|RCl=sIN_+SsOV@OIk{7%sMHVV{Z#2R zOe4X=hvCl)7tnD)Ky(?9rMj!a6<`j@0EiptW}n#(SR4+g>B{B+cJqv z6OhgV&(zW|!AFQC<3|MrYtSYe+Bg^v%~qm-Jj2h7amy=*hcDsXU*0%;6l^pQ>U#3? zY~Px13!Z?J83~qthDDeTlRO7m36C*&4u0#efoEW7u#R9q6IZF6{t`Gh1OZ_s$}pFW z&cG1QA=oQKEO^ev(a{lk&nE{&6s!kK)YbXT5Wa)ozHJ3X4`#1Q zU%a44K(R{zVUwMmeRz17VOo!%166y3W9`RVQJ6o(^iv_k0>MlhNsWW}f7^(0-}yK9 z0i1<$!Wye2_bW+2=R8ouHetVh=FDn< z@5TjiB+d>Dt?vP=-cZ#@{V$*AC%FzDT&c1zR5*6*`>EUwkua;L@!=yA6RSi3f1)SX z=Gwf+_MH;en38*a`Wx}b^tL96e~MoH(hpRty9|wrjx1_vJkbFw0p`Gg2PR6*dlL$#%fYlH zK8L3sprVfb`0?ZB-hEvK=i1-3-YtJB477#^Y~-K9YH=m~IXHW71LvMw?+tbJh+xx+ zr>s6DWieldX&;e)V1g(f974f*e*lI7(BTg;2{-n~1U{c;Q;<*t>O=*=1s1%Bs2G8~ zzo*5HE*?QGXFwXS!hxywi_6H&MB7D;B!NK?laRB}H=^MOUdVm2zDF&exSRu){*)m8 z&AWF|%IS*w`d7$yAIpIA#5%oUTl)3uFgVfBd@o>jD!ubc<|!G_|y9P@M&l30su`M{nFcts788Ib|cE z2txY`c|&3CyIzjGw?FWfU^{yJ=nk4|;pl)wjt=hj1IbY^JPIinHE06w?CVwkAC(3a z)K>!owt*+1eS$&D)1c@nQ;ZD^=%SR*iB>LcLdejcflqQfWUDO*>b3&l0UNfx(ZI%r z3$UOdA7Q#;kqb`q+yzdQE(SMaU@FJWX8$_&ze8JN@}=!Qj6`y{zb<4d67?%Jus7YnWtazYalq@Z|cE!{9m)IpDhSpFDu*P@aJK zroay>0%{8b$bvTQx_3@|9Q6l&(WOA)$Pwc7unQDIvcJEYy?q9G2M`AUi4cj~9H2C$ zgFCKz9MIGI`+D5q(@-m1O4v17E_VB$qI=oR4@NCM1})9lRctju-7OA40D_zwegFOu z(EQ!DIj59HMMk~^OB}q5imB;dIB7Cr;un681Z5P@NnRONoc;_uc^bNJ)F6kP2F}j> zIfZ|HBz&D{PU?PSXNi-8n|Fw~$!p!10LCrU$}8O;NDseUz;*6X89-fhY`)Ys{F_V1 z0~2*kO_nLWR2c*HxXQJKQ=qrH78C+?0O5$71xfkL(qK$A6&1M>GrMd;c7b2jvQSt= z#LHK1oL47}^z`CiB(G7&-a5ecQZD#%jv8vh1sVeD+|0ot=ihWUNv)s}s6LC20++DoV*hL-%+kT>PDo0kSER~V z!>2`Hm9x7Eu0!6VM{^pfalZU$-82CkAH;efq+dU;8UW@fjA0nzcwJZL#!o~KHCRUJ z5GN<6(T~OVuW*3;ubi&`8C0fi2p~6@Dg7;+ATUaB5C*z7-F+@_9fn@7aDdNcH`K1u zpq~QB-JBc-aP;bMZ8C>pP{;9q9_h{D(Q+H!2j@`txsYvTA4lHx3OMujAbJWEL<~7w zt|mK?6(sc(bhNcsBWMB_^O; z%wK~G*8I`wbEXuOjf6*eX&-je_wV16#q2hrgvToQ0=M1Opn#PEq-9h(@#rtN2=ojK zI|;u%05#+SbV08J&i)$UPUsMrlX(Tx)L0N!0d8{02OCd!V~!A?#{DY<4Dveoj~t1w zf?>-3hzFIwx5cVde{S9~2_;+L?bRx-|IfDADt!9$ds`N0c$lgZEs)I0ZSb`mo=?%7 zld&{hqs9$ikikFp`&)7?HkLkAhOXF#gXP*_TR%ip@1$tsJUm%Dm?+p<5k0hc7W;dI zagL5U-!>4wA+HJCl*;XJrCYai{=d@RJeLf+^jo)cKHukj&hz~DbDis4=hU|Mz2EnH zSg-Y3Yu&tpycL;n%&@%=W~(gDm^WjeB~fG^~kd+xP40mOZz6OS&6LYt7!h zH~-t+807sG;NIhS(8TZiCM{;Qo?)|pM#ldTg!%2^alw(bQ_5rCk>!)`HRGyp^)B;d zOYYkB3g?bU81l*ZKixEC6n{q%RLR?NeJ{9QcOETkI1(mdehos0e}9bFxSP41*TTNY z^}Yf(>{U6GkJ&|h7c6|TX?l8U>exD2Kb?IDwpelFht%?>@Cbpw0Z6@h$|*qK#^Y>x zfX>ICRonhkCk2ny?J0Vk59i00nFWE}8f|{FDtku9$8}KxJKcDC(VD~%f4P6rC$V)U zi)$VSwtd#Py_Z%|Xf7L{H`s0c9k}wXo?MWBp0mdf5m!^hvZNj98ZLNg(y~?K^NZ?E zRgRAvgWD?X;b{q%$KKjU@+4GoSxdct`+^^xI%>=D_ryFyGl>1tm$CNWo~`Y_m2azQ zG;RNFHr+e$^Jk7RyuR7y$5Ol*mKP{@krM~*<|TT1HGw79Ya2QL23RB+2YZSS$LPH% zw{umgbf)i5b(m4ZF(eO>lu@9Hh$1-m=HMR9e|9lp=E z?mkJ(+P}Uqcb6_SEDL#=UU_hj!R-gqKkk=rIo$bhlKLcja$Vr{v$k&^1trx{O&Hq6 zLmJjUy~BSaRi9?yu{TKRN!Qc=&;WD8?WaWbUT`^QSDj7 z)%I}Mxw=2QC>QegdWLX|k!hyjzb_?~p?^5!>q3{hiORy7G0;oCzkgZeiD{sJM^98o$I%@ds@*YlZ3l-=SW?zkD*g_qNS|M$)bR}CPO2ksoA@0r9Xp>W+LlY4dz4syMCf=s7>@*yi z*kgA<>ET*dRyOh|%uG7rysJ^k?!?0lol=%>n7@DjUOzpAc5fMMZK2RdbpqW5{KOLO z1WC`cVM8^#@Xejxk?XluV(7oJ-Y+blTSnO%RMKF7FM090X$Cyo6leCMz;g%I(hJv*REe5MKTY?OOcumtx=o@FEtj z6!a!ncwH;dI`^72CBRUlAiVf@I&mZap~&iO&@kK=$MN}~63yt=XU;r7<2*E(g>P^i z8YKvBwCIqF?wL6ag9N%tW;Xfw=E3!)wMtGZZXz2&Nt*^TH%ij-(sQRhd-hyq77-G}0)8pX_s9{V&2y6GSOa%GtqeF9MhuiJ;tNl-U7dn{S>(&`z z`0wx=a}<)Z3)Io$JQhvc(%qGoe~mo=E2SI=`VYcq`ll$p3r5BgP^4DIcIEorSH9Qy z%*&vfr~uDFw?>UMlsBqtRbWNa_*y6ly0aa8gx5~$NU)4CRl7})$c(J@hLaA$wH@{M^(hiQp^^J=r)e43W6&aF8 z_jI0^o2>Oe*-*9DSdShjd!pVz{tJLu5?v^yGwj~I3J?a-7)MIE*UWgth?$nKW1g+lH+H?lfv|jFJA-6HK#+kIPrsL`p~D3s ziD#b~^LZiZVh8Mhl@%3M z1_qzWC_e|ITM>#o3c?O+cAh9#d2VKqBs0_Qjj>v_Vnqj#r4}7(s=Yxv0c{AVlZU5h zezNnH5=>6y`~@#~sC8vR*Sm&rsSUQGnk2=_!pd6$4n93I^A6fDsZvzIa+H#zhYP21 zf;GHg_!Y;qK4H%#uBM$8T7;*{ksLAK)Eo5qE`)6R!O1so zIka$jX*T%6Ll9zm2XA!nZ0RW-9vhPgFg@;=R%L%0u!&#VdKt_?RoLpIeCGPh8^ID| zi;$6!xV0~I+c(3*$4cR=it$;PX``Z$VYI;Z2VJDY>;T5X*woZ1Q_*MM6DFw;aGyH+ z_F18&j%mHWHfAI0tr!49lKP}V!1leT$79y)eGV^|PoO#I!lbA-f&TuZllgaKJQUZ~ zxG8M7r995vl<80mis$UY{G87I{lbI@g(9h{8cQxcbX=+D2Tg75>@q+d1o-(`Kma{D zGJ@5M+@Y^ZIECO3;$FYLs_E3LK!^`kv9j9WC9xiZRO}$qJ9mbH$uYZH0EOPP6Rk_V zu9rWBa1L8OK#XT_=|y}tIySvTjlnlAZD=TnaWk`X^?TjAH*SNgb0L4{w{vNbRH;HLE0fIz{@TWI5*Ltr2fb3F~EAk$HBQSJ1 zf~8%lpM^oRVqTfuqTp`y>K?Rm@*mIcU4BLn;bQpiR%KygW^uzQ$`XO8!sYt;PmQ?j z{|{8KKa;J9nZ3#FYe3MEr`&66LKlvoJg?-+<*Q_&+b-rvG>mMw)I!$DlOEW zH#$u{o`0Kc2MTQ7W{kUDqqC1kA|Mqu?|);KzYc4WtO*8Rj#Kmd)d?w)@z;*scDH8d zlJ#>$h@B=a%Oxbo7Hqtj+6u3i^!_m=J2~DHP|>^lMXp-szYtlLYUUnBx0&Nqj$*1pkZNC3HOVw_h)rH%S#Qqy9mTCg8Otbj5WoIX|jx(;h z7%$~pmlk`5qs^vwp<;Szflt~A7r$BFJN#6efB&`_GCZ!vvOWuE-dIy=qsjQAV^7nC z+WhOPJMV2*RZ^O@fUB5B{H6(wGco9#w{kbP8(*lnu;f2)$w;9de4(J)_0KP}@=W=T zvpz$@Q^c++rvrMqay8%wBErJn;;0h9sMebDUn)!EideSBi2btpWqO?NH9|W`=hzm* z+YfaA0+HsoDjuj`&xY^XEZh0@&)JTSOH2E@Q)Q-K=G52Bg!;ax`wCPx7)dFL ztC*%#e^e&st&Oa3?Guo5$E;vub9Lc!<5PQR-03Klq(0O@j^Ll=!69XL`0JK04q#8; z86G%y8JvL~b$cX^z6>D3V`2u+dE-z&nrZ(U!XhljLZkorAlsp#{10Y1A;hy}%lSB> zAsA8=OObLuzc8hl~3GTGySQe}*RhdkL@e-kE)3xo?i{ zV>#da=|-EM+7m89+qU(ai?7lYBAUcAD)&DNII*7FJz~n}dfuggfK^Y*IUMwE9|+|S z9{1RPmw&2evemsOT3l2U`kW3Ni6`^Sa?fo+4?pj7M(-vyx{zVgsQRf&kS$p zW$(#D^o`8KgjxO4qFWnYTDs1bEYJ4-b+CnYmY8L1zjD3vWM~L+lc6%vwxDiPs#zj~}x5BQs`$JpmvKTk$jf{Um#Tk1j`G_yT)_`pn; zJzGCZf*ldHRI>E)^PPtjd=C*fiKyLmm+fzLynrbReQ}7&9_Z-l*})eA7W^IbC)^dy z5Y%Jm?}l+WJTme<0IgLkz<5otkS2+Y$-g>wzQxee;{vh}8=-v;+Mpas3Vj31^)XGk zB+M$IQPz#zaRZcl7dSa>I-1s095@v)$OQ4qCYE`awlMUC#GP|H<5@v9D-71`bGW8Qw8 z&!wANe5`7CW3H6Wqz^)8i=nn3h5LJxgI*-LasVdinQi+F2I1=Hf}}i2qb|&D?`CwFd*NMF;Tq3@rbMGJ}d%W9Xebw<)5!QSZ?5y-m>!b7onpC&Zs_fV&8OkPrxCSV=3Z0ch*;aXT0{$ zb2X!rabfnu1q4f)JZl)>-a@6t@^vzN@poI(S$lB{!ySVQ16x(^&KRzmY1Fx86^apo z=Z7~HTE=|@@|@(fX+XOH{{B|Gb}^!ud#~a(eLGWtfL_TLTEiXl^~04PaYwnixa=ww z5vPE$xV=f8jK$$g#k#J5U?+q^ihfWM4Eroh_$Yf9262MksNok^BrKF*$RkH>D~eQA?eeNWZ#@6w!$q*L z`3WbdU%@+`27SHw_M}O}Zrxdkq7_r#^ED+17$%)Px866JEmi&7qplz1c7&=dHM?U4 z3JTsDj+v>RSSnNn=;zbW?VlZuUC^P=0>c+0a)zh;GrX(t->b20Pzt9cKnyiV4lSvbhuUYzm zSwvV9uA)TAfty>Oky+I_Xji7_sBVM@GXJO6>9+sKkKa1H_CX2{9+7%)6>1|e2 zhDRQ{f5qtDhv3#^TD@iK90RxP*5c9-oaagV4Ktrnqw74vGe!_AGd3Hj;7b>JqQgpa zbQDv?8GESd_j!E%9)vNOnivfyy^o&MZu#J9ayRVdC!HkKjgCLniQ)=xiKV~p`up^_ z`bI@lBaxHS zMl)z;-&+~Y(Vm)~?ODyoD_=E7#GwfyZeEbLtL=S7TiC);Q`1V(7`IDE8I%^E8D&mZ z{j&dh7&)#onoLVqNmlbKFU*eWXlqM-M_;BYWl@7N{G;+Cs<8($nsvW^I=VmvB`fXp z+yk}wBsTh-%;b*4cNK?2avuz2ib+uGXl zqLLR879M`hB0zB{?0JgPWDo5(7A5&2>^OoB5*-(L>7>f*47AjHYDXcJmZ4H3;^)#^ zkt9o}6i$8TdkL4;n{k7qX)4r_>B&$f!YCgu!{}v?=K}w+42TWLb6i)PUk03lBAk`zN_xPh7IU9>% z>G=|`%_(#5w+G(s+*A`^X{y-y4pA5NCI`dTFS>KdCdVYcYz_BfZ$J8lJxcG*e8h{7 zr8}F0^t-K}%pSahJ*~4g4$XgZ(dmYrm!;D3dNxz3japYeefqT3blq+`>Ut~m(aRI- zk`j;Z1gJtKy7oKbaruB}3a|7$do_3*e%7~vd_8;JqAvB6>*rJ``#gTu>=`ljBFHc& zDvH1L+^7AdU0&Z|C$%u$tL+BJRf(=rX z4nH&1u8qMHoAyX=*>VFZ{LDo_2JjCyJ znL%9HqL&>(?oiHUZ#e(H5nJ{4kkhLb)+ikFI}+=H$C+*51m#!pl((x4f1R1RWBkX* z%c%v^Vr12M?b@|;(0HDmfpS`3L*j6XI%7|UoKj9kTAD-+I4^?uQ1W=ol&%$@w;~tTGwR__>3*$b>nA4QsYjVByDw+hP~{nqFI+mtzz# z>^pMztZR+F7YU^*qz3XM=S&mcPw-C(PB!%DyztC5IhMn+!Oa&D64`Q*<9xSNrk|c^ zdG%_IYR%#dOE{l(XtL*eeM_1uegwv`>5ye*SRFXLYz?LsRajG{Dsv*S>s{2aWRCJax9#mXwAvBYkzZdR z;Q^LTNyXye>>chYrjt)vn{?+gP1X~?mpF5*-8Yo`W`G+ZGBh~X%WTd8a_$h{9f5EO z$T+BIQLhM7Qkm)sA5riVn3575(vhfrK*TLopkXf>c z7f&{42V=?|NB9eqI4V{*FoL4h)y$JI9+gYs_lHLy7Rb_Ss@NUjJ z9lKolHycpt`=md8kmdFz*;od)!ea8%0$HM$L;{4s99CH6ctJo?n@g@RAFeqOKQY8{Hg9$vJKpIZ?Pix)4M=PO<|h5Wp4PAby|<4#zP_|1 zATY3-+%2I^L=;v_xEy3zLEKVr;&!d-zTc(QOtiJJC9?&iMi&wd+(KQ=F|55<0mme% z(9Tek)XgM{#s<{tdNs=Rebs$Et0#BAP~hMgUd7m@UJ*EUVNELPcCufP{oA4xLb&J9 zA=<9GzGcIOADf_U&E2~;H@B{4lXdAr z>Ai<~^r96?A^K|SbUo40->4;`Npn!6C1&Y+)CZJW`3bX`ATkhKE+b>x+!i(u?!+cV^7BPxe~@`A~OvLi^Q-1H5z7^Z2=({it|I#45T!vIN&Lf;a_|7kuT?w}*iB z+7gWCa$j_~&MKNcwOhi2?8gcBkq^z&$@o|_7tH^QD#Hv>WPEB8ozxuk5wf60#0GRW znvf;ZRqJ=dItOuZffcd5^Jc!!eBnKk)Iur$tiiOQYovn!mE8N%&d!dnkMz@0A-?N$ z>6Xz%O<)BxD^e_V=t@XLFMjhTU3YR~m0|{N5@7fR zwZ%cuplUg%X!{gYJ;_T!u-F_F@}*G6zUk$FSkl`x5#F}1qa;PSdia`XtS24dts{S; zwE~YVd5$fI46&;~6-!pu{jmMwu-accZ!iVNvy_SV{LM*0H0I3gYYfejjPNy-a7yu{-a_R3N!mFyoy@$(>MGy8dpdHz1BW<-@gLm0 z#`}#K+cA>bbOfPTAPWnd9GsR1#(#c#d*mgJ_8X`v4WW?Rwr`hxd^bxSXC~O7Qn!|L zn|U*Q;Vuxf=~z7blb6+^)W!fufLlIY$aO;NaUa48qJ@QpXMe=qa;q~u%u@jxtz0YUy*bO0C1;>i#C6zU zD=v>LoLf6AyDX~f-H9bUh}3`!-2ivdBS@@p9%??47~grKJ^;^%kXjvxZ%eWayP6K0 zo>>AG{@vWnLm7YZAoh z>e^z2P~}7ygncD<_dU+ezwSWHc4)(rXE8T!-BMi-*TbtitDY5ssWW-<1q{!9o?r0P zh$0`NvHpCsW!?r!3j%&E8x$2r#4kiCi2PKdu7Jg~)^{e3=$_x?i1eLq}3pP%(8PEU$<#&Xf1U3+EU>)eyAwq8(C=xRRsebJ57k4qt zzKMUr>q2&xItnRAPI6w}idxjIFC~kfDXNLParI2iNBl{EM-Z}wtD)?(@(GEEFt%(P zexf>e0}^pT2h|W4_++kKy*eCyzIBs>glH=s`O>NK5HRC!#9o%KE8sILg`++xk4x`< z473gLb^!MEXk}ftG0c4`FmtQQwj!(1i3PAdjkmyO%Q1Yh`dbqDUtC`pr0Di8$pB#T zn%UZMS-PsZZRVoM5{J^8Yf0J&8y6_VWxkN2&jQlQV_tQRfh%@zI+-N}42$hvv9qIN zvc~7DDkH*7-9GJ?8^#jtfk7|HAnBf}j|xO*XiKhn{OFMs3Jw$$BHI53mR+Dk3$iaXeL}oGkLj&jtT#@Yp4(1ZZf)+m5A}M8}*P@kCo6j%4oz3}rhxN8lK3p*%kvvoW? z?%dD9hQ}68`|gf^wnXF4Yi346$-6b;AENk5L!Fo!kyChm2wBh}gZZ2BY?5l)+wIk;K-p+sZ(~uPurov{ikXSpBW`F-4|`Smw+=2RskD$a#j`> z7eC3So=P+l6&xH{aC9L|3XluKCtm2kmLoW~77}S0LLoe_Z~{g7U0mLVc}!o=dsVJ! z=(Ht$n|^ezd!0#(&)g49Rqv^cB^TQhlb(I;m~`IH!SUQwK%##zBmcwWt@a-;^zGVM zD8~_eA*ry+R?%TB81|>zl(v`C>aVSflLvjdme6SN01q8pIw?kn2VbHyH-pLg$Vh<0 z&+oGxsX0WE(~>Wzk<9t7T%UZe#x7p;)ADkG`WN|tBX*|;I=`{h_0AsH zDEi06*;WmTubYeA7__TS>C?oY8u2QQt$FSozOH7?rAJ-TW`cM3rL4RdrAAJJ$t4*{ zb%w5E@E`PbpwddZ#i^mu8wjREiRJ#qu!$j~_3v0{j2!QV_G@Uo&&>HAd7?l#!Q-}u zMxH%NpGOqC`i}+2>Ut}=7GJ?1+`oTBqRn$Ou=F~+px{Lt;0Z3Uwcc#Ymm6D+6%?@H zLw(Q%;uv0OmSA)^1Q9$EPW8!+5}?z>6)x&r6!h)}WFk39oSD1mZNa8AKv`=h zzvRBToN)2mA#rNnrv+E5C4V(Ow!A$Ewff`WC=J>kC#QZpo4Ie$lHc7Y4Lc(4_QZ+Y z$K*c>GFDbrs!&)cxZ^aN^MGc4g4&o>FKn13WB&h!&7RERc=5o|<@?I68f(KOPL9HM z_?rX;SBIl9b5@8Us_0RMbNA5p6sPGbqjP(qiPz9*j>GFG|LTqwX2lUJ7@{)L4sIrY3BR43KJ6Q#AJbo16C{qa%%7cIj#)f78L z1sV1#Zsp)eV8x~BEftSeR$!lLj8}{6WW}I;HNN4ebrOS^^S)-ok`7}b5>c1vk+<^f zpTx75xtxM3PX4?5SU*dQEj9&XWRc&(p(@8zB#<(A#qX9Q(Z1Hde*M#OeJdh~VaM}q zfxoU))bUWtv-7}IG^~#W8^B!s=aYZt&yy`qNJdziUYrPHx_@~4@jkJ|aYv*>IxeS4=?Y9XKXijkT3D`V=+xMvLg|{uJo+2 zj!r=1ZeE;H8ne5Eh_}fYO+UZ#LvD*8WRta{f(PBWwHICF4O8@-lqA~)xR!eT8q>p6Z;>?)2WS*QZ=sb%N{d74{p~!l z&N$oqFmfX97=3|JI0(9d{C*nF(h>izcUJ^i$ti>%20KOHKi55LtCRU--J`XG!(m-d z)6#C^hRozq(-v>+eEf3XneBs@YeY5Cu{(|fgH-JR_VlNxr-$FUQ-TaP66-;VB;vkh zF}GYE1C&SeJk14`eNYj$P=TGyh3YTk%l{}g?^>nXsqDd<)05tKMs_B@3!3Y<_d zD>I%YB<1A7z<}A|syDcvZC)Jb_hW?X5xl#=h)Dm0k)NMW`o>4uB~nygC^NtcJ62+Q z;Ul$SC~ZctcQO}|U55F;yA1X2vFoY+v7MQK30$DMs$J}q@?m@h!+j5S6j0zu$@8h27C>Iz~tn$i)(0 zmEGg?)7|8j6EET&#t)WdB;iTSameFP(peiMp((TMbWZm=bbIfh2qOOiJhsZhiw zy0&@O?LX_HlqdHY(_qXW$NLHXrOttM(N9Yx66>ZE|8b3if+Fvh9$;)&CQcW7->)Z% z7>PcHE`~`7BCsNKh{Zy^vF+qi=hLhM@{SDc=4n13^p>P?6`d#cC~W(*x~PIM+j_z( zv^bB==HzLKdxeZ^L*fQ)=1iC@IDPG}yKf=p89frZCWw;H+yDh1In)%Pk-K8P#`;$Y z44mQj?wxQDh$>CZb!g>t+6^=M3Fgz(&6!$yTr(uEmTLw_Km(* z(i`%LHE+x&<2leDjts5xXZhvh_#;lsi|beUN64n!aQ@@Qkz+R6dcfar@^MI-K>v*+ zk7x%oSkX_ z#1O~XA2#Dheqspw3{P}Jh4;>mTArAf#5`Zoa>BeQCfuN5@qKOL87jx_Twi{$c6$e=;Q9O%h zUx9xtzRI?fti8NzB4n=rO{Zg-!T7(IJ`2h%;`Z$#B)U6;{q%&O3LU7z1r<^fixw^% z0=Llwv;>Li=#<1p7mBvT(JZCi9dEnUmql#*r`+1YDne;RyQt{}wFjc*!Dt@cde{*W z$G|qqU&GJJ^2_Rx%Q1$TN9S12{H*M+OKYq69mYGoxnqAS12^_CorB$Ku=4reQX26K z{?P2!8CaH=oh>f}r5rXhviXAAO$wY7#n`~01m^DoL^e~(5zS7+?zC;omQC8aQS!l% zwr$cz9l=R(Z8y=No4$17fvBwA<;f-#)Yx4xT((E$?sc^2u~Pdum{& zpWoGMM_U*Y!U|ZNxA#wUHm_)BBLq4Q*2=63i>Vsp=)b{2e6i5m%WLpS*G5)Y(|0Z= zPS~b!VMWm?)7&0=W#66(@50qZn@slx@fLg6=sPv`EhSo6K5&lDJlv^}+$sJ=f^`i? zkK?hFQ*gY&ZmtMs#)%@`ZK@pvy~z5`F(fAN2nguWN!{N+sg%?7Fth>VA2kci|6)N% zXB5Tw$dPx7gW1L5J>mRb&|1LmL`6%b94Z*T#YZGNeSCQJ%=k?TsXGP_9fBeohw|Eb z<75B%&iw2+0fK!$GZiV28%MQOnt5sgPgr7I)n(;`cMNIk#wko`u~qI_PSdu8m81QW z^el^o`#zlGy)HwH^6c%8(xD~NM;f6=?pJ8CoH?6 z`qYI6!d>+_T2WZQ&1sfQB6lD{25gK5q)mxGj-_ANY(=|o1AG|h#`{>{^o6<9M~E~O+-() zAyj7ZP+Odz0>e*E^X%3r$!Q_30X@jo<(jnM8L@ausxF;;>qf+(dt)|G1}55+&?}&n zlk^rY(~uqqNp=XOCW@+lugVtvQH1PaA~kC{lh#pK6GD=yi_sOJMT&57hr|G?xJ6wRPi z^$vPvb+zr^oCRPD$wrboFf=j(_Ih;N z`GDXgISagp^XI4fp6KOi)3MQZLp?%1$byv194UDGvUel3mPdpmL2iWDmW4V-u!DXs zN%4SY)P~;;Mj#X!qcbKA<*Mq2e=Qz%F)T#obMj>eQ#KQt1jVFX0Y;4Ufx)J%nq*rq6DKF9yAB8bT*?t_ z(!@0L$hGigE@s7;f#R3gpKI++6}Z{i*jmp+|B?Gnh*1~06Ij;zHVQD>__=j4e2Eps zt6iRK@eN~GL-;+Jg0;eXh`}QAs@L$^{OR!lk9cwV4K+?azP{fhe?HxK8Lw7gnq>0rB=#I>=Hdue zbC&ZRNEv5ybr%$~e(eJ$Hbpsrz~(YXPNDQ^K~{0LtBELOvws{WmNI6q zRC`+dyYRJl)hrM3k?m8~>jD`Ou|X%`$Hy>Zcy;He2P&`-!-ko$5t3BX9jH(JhoS&yHOn37-ral&A6$wfq z>omV?%WxK-Btg~}28V)|a#^L4H9LL^GK#jvs-h%UE|v(5zxL*_1QXyN$}Z?AZO#m zOLC=rzlm@A>z7VVa>ulsH~142bW=`FPOJRX>R({^4Z&9z*wi0cygokN=h_x**HNw$N7 zBa6bwJ@9M#46$v-_=lv|gU1c>UJSmM2^D5Zj>R|s|MxG3&IP8%5Em_tdvAmZ{Almi L)p)kc;?n;IxkQrd literal 0 HcmV?d00001 diff --git a/docs/cephlab.svg b/docs/cephlab.svg new file mode 100644 index 0000000000..d1b446d496 --- /dev/null +++ b/docs/cephlab.svg @@ -0,0 +1,3 @@ + + +
laptop or workstation
laptop or workstation
paddles
paddles
pulpito
pulpito
teuthology dispatcher
teuthology disp...
teuthology scheduler
teuthology sche...
package builder
package builder
















.
.
test nodes

....


                       . . . .

smithi
. . . ....


                       . . . .

mira
. . . ....
smithi1
smithi1
smithi2
smithi2
smithiN
smithiN
mira1
mira1
mira2
mira2
miraN
miraN
job supervisors
job superviso...
beanstalkd work queue
beanstalkd work...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/commands/list.rst b/docs/commands/list.rst new file mode 100644 index 0000000000..0ac9437ea4 --- /dev/null +++ b/docs/commands/list.rst @@ -0,0 +1,9 @@ +Command line interface (CLI) +============================ + +Help output of the available command line tools for teuthology. + +.. toctree:: + :glob: + + * diff --git a/docs/commands/teuthology-describe.rst b/docs/commands/teuthology-describe.rst new file mode 100644 index 0000000000..fb1f95c3a2 --- /dev/null +++ b/docs/commands/teuthology-describe.rst @@ -0,0 +1,4 @@ +teuthology-describe +=================== + +.. program-output:: teuthology-describe --help diff --git a/docs/commands/teuthology-dispatcher.rst b/docs/commands/teuthology-dispatcher.rst new file mode 100644 index 0000000000..3fa5166a11 --- /dev/null +++ b/docs/commands/teuthology-dispatcher.rst @@ -0,0 +1,9 @@ +teuthology-dispatcher +===================== + +.. program-output:: teuthology-dispatcher --help + +trouble-shooting notes: +======================= + +- Github unreachable kills dispatcher - The dispatcher might be killed when github becomes unreachable, e.g., https://tracker.ceph.com/issues/54366 \ No newline at end of file diff --git a/docs/commands/teuthology-kill.rst b/docs/commands/teuthology-kill.rst new file mode 100644 index 0000000000..6bc084165e --- /dev/null +++ b/docs/commands/teuthology-kill.rst @@ -0,0 +1,4 @@ +teuthology-kill +=============== + +.. program-output:: teuthology-kill --help diff --git a/docs/commands/teuthology-lock.rst b/docs/commands/teuthology-lock.rst new file mode 100644 index 0000000000..5123175f0d --- /dev/null +++ b/docs/commands/teuthology-lock.rst @@ -0,0 +1,4 @@ +teuthology-lock +=============== + +.. program-output:: teuthology-lock --help diff --git a/docs/commands/teuthology-ls.rst b/docs/commands/teuthology-ls.rst new file mode 100644 index 0000000000..856f561f7d --- /dev/null +++ b/docs/commands/teuthology-ls.rst @@ -0,0 +1,4 @@ +teuthology-ls +============= + +.. program-output:: teuthology-ls --help diff --git a/docs/commands/teuthology-nuke.rst b/docs/commands/teuthology-nuke.rst new file mode 100644 index 0000000000..77ec0b89ea --- /dev/null +++ b/docs/commands/teuthology-nuke.rst @@ -0,0 +1,4 @@ +teuthology-nuke +=============== + +.. program-output:: teuthology-nuke --help diff --git a/docs/commands/teuthology-openstack.rst b/docs/commands/teuthology-openstack.rst new file mode 100644 index 0000000000..501fbfe801 --- /dev/null +++ b/docs/commands/teuthology-openstack.rst @@ -0,0 +1,4 @@ +teuthology-openstack +==================== + +.. program-output:: teuthology-openstack --help diff --git a/docs/commands/teuthology-prune-logs.rst b/docs/commands/teuthology-prune-logs.rst new file mode 100644 index 0000000000..c534d5739d --- /dev/null +++ b/docs/commands/teuthology-prune-logs.rst @@ -0,0 +1,4 @@ +teuthology-prune-logs +===================== + +.. program-output:: teuthology-prune-logs --help diff --git a/docs/commands/teuthology-queue.rst b/docs/commands/teuthology-queue.rst new file mode 100644 index 0000000000..3f8c22283a --- /dev/null +++ b/docs/commands/teuthology-queue.rst @@ -0,0 +1,4 @@ +teuthology-queue +================ + +.. program-output:: teuthology-queue --help diff --git a/docs/commands/teuthology-reimage.rst b/docs/commands/teuthology-reimage.rst new file mode 100644 index 0000000000..eb085af791 --- /dev/null +++ b/docs/commands/teuthology-reimage.rst @@ -0,0 +1,4 @@ +teuthology-reimage +================== + +.. program-output:: teuthology-reimage --help diff --git a/docs/commands/teuthology-report.rst b/docs/commands/teuthology-report.rst new file mode 100644 index 0000000000..bdd3c49cce --- /dev/null +++ b/docs/commands/teuthology-report.rst @@ -0,0 +1,4 @@ +teuthology-report +================= + +.. program-output:: teuthology-report --help diff --git a/docs/commands/teuthology-results.rst b/docs/commands/teuthology-results.rst new file mode 100644 index 0000000000..22c3eee8f4 --- /dev/null +++ b/docs/commands/teuthology-results.rst @@ -0,0 +1,4 @@ +teuthology-results +================== + +.. program-output:: teuthology-results --help diff --git a/docs/commands/teuthology-schedule.rst b/docs/commands/teuthology-schedule.rst new file mode 100644 index 0000000000..3c03c3f575 --- /dev/null +++ b/docs/commands/teuthology-schedule.rst @@ -0,0 +1,4 @@ +teuthology-schedule +=================== + +.. program-output:: teuthology-schedule --help diff --git a/docs/commands/teuthology-suite.rst b/docs/commands/teuthology-suite.rst new file mode 100644 index 0000000000..85c63eed79 --- /dev/null +++ b/docs/commands/teuthology-suite.rst @@ -0,0 +1,4 @@ +teuthology-suite +================ + +.. program-output:: teuthology-suite --help diff --git a/docs/commands/teuthology-update-inventory.rst b/docs/commands/teuthology-update-inventory.rst new file mode 100644 index 0000000000..dc4e216baf --- /dev/null +++ b/docs/commands/teuthology-update-inventory.rst @@ -0,0 +1,4 @@ +teuthology-update-inventory +=========================== + +.. program-output:: teuthology-update-inventory --help diff --git a/docs/commands/teuthology-updatekeys.rst b/docs/commands/teuthology-updatekeys.rst new file mode 100644 index 0000000000..c0525aecc5 --- /dev/null +++ b/docs/commands/teuthology-updatekeys.rst @@ -0,0 +1,4 @@ +teuthology-updatekeys +===================== + +.. program-output:: teuthology-updatekeys --help diff --git a/docs/commands/teuthology-wait.rst b/docs/commands/teuthology-wait.rst new file mode 100644 index 0000000000..072b87e70d --- /dev/null +++ b/docs/commands/teuthology-wait.rst @@ -0,0 +1,4 @@ +teuthology-wait +===================== + +.. program-output:: teuthology-wait --help diff --git a/docs/commands/teuthology-worker.rst b/docs/commands/teuthology-worker.rst new file mode 100644 index 0000000000..c0096092c9 --- /dev/null +++ b/docs/commands/teuthology-worker.rst @@ -0,0 +1,4 @@ +teuthology-worker +================= + +.. program-output:: teuthology-worker --help diff --git a/docs/commands/teuthology.rst b/docs/commands/teuthology.rst new file mode 100644 index 0000000000..0b7fdf2abb --- /dev/null +++ b/docs/commands/teuthology.rst @@ -0,0 +1,4 @@ +teuthology +========== + +.. program-output:: teuthology --help diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..bce967b1ee --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +# +# teuthology documentation build configuration file, created by +# sphinx-quickstart on Thu Aug 7 12:30:36 2014. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosectionlabel', + 'sphinxcontrib.programoutput', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = 'index' + +# General information about the project. +project = u'teuthology' +copyright = u'2014, Inktank Storage, Inc.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.1.0' +# The full version, including alpha/beta/rc tags. +release = '0.1.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'ceph' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ['_themes'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'teuthologydoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'teuthology.tex', u'teuthology Documentation', + u'Inktank Storage, Inc.', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'teuthology', u'teuthology Documentation', + [u'Inktank Storage, Inc.'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'teuthology', u'teuthology Documentation', + u'Inktank Storage, Inc.', 'teuthology', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False diff --git a/docs/detailed_test_config.rst b/docs/detailed_test_config.rst new file mode 100644 index 0000000000..f97334b9f0 --- /dev/null +++ b/docs/detailed_test_config.rst @@ -0,0 +1,315 @@ +.. _detailed_test_config: + +=========================== +Detailed Test Configuration +=========================== + +Test configuration +================== + +An integration test run takes three items of configuration: + +- ``targets``: what hosts to run on; this is a dictionary mapping + hosts to ssh host keys, like: + "username@hostname.example.com: ssh-rsa long_hostkey_here" +- ``roles``: how to use the hosts; this is a list of lists, where each + entry lists all the roles to be run on a single host. For example, a + single entry might say ``[mon.1, osd.1]``. +- ``tasks``: how to set up the cluster and what tests to run on it; + see below for examples + +The format for this configuration is `YAML `__, a +structured data format that is still human-readable and editable. + +For example, a full config for a test run that sets up a three-machine +cluster, mounts Ceph via ``ceph-fuse``, and leaves you at an interactive +Python prompt for manual exploration (and enabling you to SSH in to +the nodes & use the live cluster ad hoc), might look like this:: + + roles: + - [mon.0, mds.0, osd.0] + - [mon.1, osd.1] + - [mon.2, client.0] + targets: + ubuntu@host07.example.com: ssh-rsa host07_ssh_key + ubuntu@host08.example.com: ssh-rsa host08_ssh_key + ubuntu@host09.example.com: ssh-rsa host09_ssh_key + tasks: + - install: + - ceph: + - ceph-fuse: [client.0] + - interactive: + repo: git://git.ceph.com/ceph.git + +The number of entries under ``roles`` and ``targets`` must match. + +Note the colon after every task name in the ``tasks`` section. Also note the +dashes before each task. This is the YAML syntax for an ordered list and +specifies the order in which tasks are executed. + +The ``install`` task needs to precede all other tasks. + +The listed targets need resolvable hostnames. If you do not have a DNS server +running, you can add entries to ``/etc/hosts``. You also need to be able to SSH +in to the listed targets without passphrases, and the remote user needs to have +passwordless `sudo` access. Note that the ssh keys at the end of the +``targets`` entries are the public ssh keys for the hosts. These are +located in /etc/ssh/ssh_host_rsa_key.pub + +If you saved the above file as ``example.yaml``, you could run +teuthology on it like this:: + + ./virtualenv/bin/teuthology example.yaml + +It is possible to configure installation so that specifying targets and host +keys can be omitted. Teuthology is run with the ``--lock`` option which locks +the targets based on ``roles`` in YAML. Teuthology grabs machines from a pool of +available test machines; but since most times machines are busy, you might have +to wait until they are free or else command fails due to lack of available +machines. To avoid this you can specify ``--block`` with ``--lock`` which will +make teuthology retry until it finds and locks required machines. + +You could also pass the ``-v`` option for more verbose execution. See +``teuthology --help`` for more options. + + +Multiple config files +--------------------- + +You can pass multiple files as arguments to teuthology. Each one +will be read as a config file, and their contents will be merged. This +allows you to share definitions of what a "simple 3 node cluster" +is. The source tree comes with ``roles/3-simple.yaml``, so we could +skip the ``roles`` section in the above ``example.yaml`` and then +run:: + + ./virtualenv/bin/teuthology roles/3-simple.yaml example.yaml + + +Reserving target machines +------------------------- + +Teuthology automatically locks nodes for you if you specify the +``--lock`` option. Without this option, you must specify machines to +run on in a ``targets.yaml`` file, and lock them using +teuthology-lock. + +Note that the default owner of a machine is of the form: USER@HOST where USER +is the user who issued the lock command and host is the machine on which the +lock command was run. + +You can override this with the ``--owner`` option when running +teuthology or teuthology-lock. + +With ``teuthology-lock`` you can also add a description, so you can +remember which tests you were running. This can be done when +locking or unlocking machines, or as a separate action with the +``--update`` option. To lock 3 machines and set a description, run:: + + ./virtualenv/bin/teuthology-lock --lock-many 3 --desc 'test foo' + +If machines become unusable for some reason, you can mark them down:: + + ./virtualenv/bin/teuthology-lock --update --status down machine1 machine2 + +To see the status of all machines, use the ``--list`` option. This can +be restricted to particular machines as well:: + + ./virtualenv/bin/teuthology-lock --list machine1 machine2 + + +Choosing machines for a job +--------------------------- + +It is possible to run jobs against machines of one or more ``machine_type`` +values. It is also possible to tell ``teuthology`` to only select those +machines which match the following criteria specified in the job's YAML: + +* ``os_type`` (e.g. 'rhel', 'ubuntu') +* ``os_version`` (e.g. '7.0', '14.04') +* ``arch`` (e.g. 'x86_64') + + +Tasks +===== + +A task is a Python module in the ``teuthology.task`` package, with a +callable named ``task``. It gets the following arguments: + +- ``ctx``: a context that is available through the lifetime of the + test run, and has useful attributes such as ``cluster``, letting the + task access the remote hosts. Tasks can also store their internal + state here. (TODO beware of namespace collisions.) +- ``config``: the data structure after the colon in the config file, + e.g. for the above ``ceph-fuse`` example, it would be a list like + ``["client.0"]``. + +Tasks can be simple functions, called once in the order they are +listed in ``tasks``. But sometimes it makes sense for a task to be +able to clean up after itself: for example, unmounting the filesystem +after a test run. A task callable that returns a Python `context +manager +`__ +will have the manager added to a stack, and the stack will be unwound +at the end of the run. This means the cleanup actions are run in +reverse order, both on success and failure. A nice way of writing +context managers is the ``contextlib.contextmanager`` decorator; look +for that string in the existing tasks to see examples, and note where +they use ``yield``. + +Further details on some of the more complex tasks such as install or workunit +can be obtained via python help. For example:: + + >>> import teuthology.task.workunit + >>> help(teuthology.task.workunit) + +displays a page of more documentation and more concrete examples. + +Some of the more important / commonly used tasks include: + +* ``ansible``: Run the ansible task. +* ``install``: by default, the install task goes to gitbuilder and installs the + results of the latest build. You can, however, add additional parameters to + the test configuration to cause it to install any branch, SHA, archive or + URL. The following are valid parameters. + + - ``branch``: specify a branch (firefly, giant...) + + - ``flavor``: specify a flavor (next, unstable...). Flavors can be thought of + as subsets of branches. Sometimes (unstable, for example) they may have a + predefined meaning. + + - ``project``: specify a project (ceph, samba...) + + - ``sha1``: install the build with this sha1 value. + + - ``tag``: specify a tag/identifying text for this build (v47.2, v48.1...) + +* ``ceph``: Bring up Ceph + +* ``overrides``: override behavior. Typically, this includes sub-tasks being + overridden. Overrides technically is not a task (there is no 'def task' in + an overrides.py file), but from a user's standpoint can be described as + behaving like one. + Sub-tasks can nest further information. For example, overrides + of install tasks are project specific, so the following section of a yaml + file would cause all ceph installations to default to using the jewel + branch:: + + overrides: + install: + ceph: + branch: jewel + +* ``workunit``: workunits are a way of grouping tasks and behavior on targets. +* ``sequential``: group the sub-tasks into a unit where the sub-tasks run + sequentially as listed. +* ``parallel``: group the sub-tasks into a unit where the sub-tasks all run in + parallel. + +Sequential and parallel tasks can be nested. Tasks run sequentially unless +specified otherwise. + +The above list is a very incomplete description of the tasks available on +teuthology. The teuthology/task subdirectory contains the teuthology-specific +python files that implement tasks. + +Extra tasks used by teuthology can be found in ceph-qa-suite/tasks. These +tasks are not needed for teuthology to run, but do test specific independent +features. A user who wants to define a test for a new feature can implement +new tasks in this directory. + +Many of these tasks are used to run python scripts that are defined in the +ceph/ceph-qa-suite. + +If machines were locked as part of the run (with the --lock switch), +teuthology normally leaves them locked when there is any task failure +for investigation of the machine state. When developing new teuthology +tasks, sometimes this behavior is not useful. The ``unlock_on_failure`` +global option can be set to true to make the unlocking happen unconditionally. + +Troubleshooting +=============== + +Postmortem Debugging +-------------------- + +After completion of a test, the ``archive`` subdirectory is archived under +the corresponding ``remote`` subdirectory. We can disable this behavior +using the top-level configuration, like:: + + archive-on-error: true + +If ``archive-on-error`` is ``true``, the ``archive`` subdirectory is +archived only for failed tests. + +If the size of the archived file exceeds 128MB, the file will be compressed +using GZip. This threshold can be configured using the top-level option +named ``log-compress-min-size``, like:: + + log-compress-min-size: 256GB + +Other size unit postfixes are also supported, +see `humanfriendly document `__ +for more details. + +Situ Debugging +-------------- +Sometimes when a bug triggers, instead of automatic cleanup, you want +to explore the system as is. Adding a top-level:: + + interactive-on-error: true + +as a config file for teuthology will make that possible. With that +option, any *task* that fails will have the ``interactive`` task +called after it. This means that before any cleanup happens, you get a +chance to inspect the system -- both through Teuthology and via extra +SSH connections -- and the cleanup completes only when you choose. +Just exit the interactive Python session to continue the cleanup. + +You can enable interactive-on-error with the ``teuthology`` command option +``--interactive-on-error`` + +Interactive task facilities +=========================== + +The ``interactive`` task presents a prompt for you to interact with the +teuthology configuration. The ``ctx`` variable is available to explore, +and a ``pprint.PrettyPrinter().pprint`` object is added for convenience as +``pp``, so you can do things like pp(dict-of-interest) to see a formatted +view of the dict. + +This is also useful to pause the execution of the test between two tasks, +either to perform ad hoc operations, or to examine the state of the cluster. +Hit ``control-D`` to continue when done. + +You need to nest ``interactive`` underneath ``tasks`` in your config. You +can have has many ``interactive`` tasks as needed in your task list. + +An example:: + + tasks: + - ceph: + - interactive: + +Test Sandbox Directory +====================== + +Teuthology currently places most test files and mount points in a +sandbox directory, defaulting to ``/home/$USER/cephtest``. To change +the location of the sandbox directory, the following option can be +specified in ``$HOME/.teuthology.yaml``:: + + test_path: + +Shaman options +============== + +Shaman is a helper class which could be used to build the uri for specified +packages based the 'shaman_host': 'shaman.ceph.com'. + +Options:: + + use_shaman: True # Enable to use Shaman, False as default + shaman: + force_noarch: True # Force to use "noarch" to build the uri diff --git a/docs/docker-compose/README.md b/docs/docker-compose/README.md new file mode 100644 index 0000000000..e69d1bf7f1 --- /dev/null +++ b/docs/docker-compose/README.md @@ -0,0 +1,93 @@ +# Teuthology Development Environment Instruction + +The purpose of this guide is to help developers set +up a development environment for Teuthology. We will be using +Docker to set up all the containers for +Postgres, Paddles, Pulpito, Beanstalk, and Teuthology. + +Currently, it's possible to execute against two classes of test nodes: + +* Using containerized test nodes + * Advantage: No need for a lab at all! + * Disadvantage: Cannot run all Ceph tests; best for exercising the framework itself +* Using nodes from an existing lab (e.g. the Sepia lab) + * Advantage: Can run all Ceph tests + * Disadvantage: Requires lab access + + +Additionally, there are two modes of execution: +* One-shot (the default): Containers start up, schedule and run the `teuthology:no-ceph` suite, and shut down. Success or failure is indicated by the `start.sh` exit code. +* Wait: Containers start up, and `teuthology-dispatcher` is started, but no jobs are scheduled. Runs until the user presses Ctrl-C or `docker-compose down` is run. + +The teuthology container will be built with code from the repository clone that's currently in use. + +## Prerequisites + +### Installing and Running Docker + +For Docker installation see: +https://docs.docker.com/get-docker/ + +### Using Containerized Nodes + +There's nothing special to do; see the Running Tests section below. + +### Using an Existing Lab + +This document assumes you have access to the lab that you intend to use, and that you're already familiar with its VPN and SSH infrastructure. + +Depending on your local operating system, it may be necessary to connect to the VPN before starting Docker. + +#### Using your SSH private key + +In your local shell, simply: +```bash +export SSH_PRIVKEY_PATH=$HOME/.ssh/id_rsa +``` +The teuthology container will write it to a file at runtime. + +#### Reserving Machines in the Lab + +Taking the Sepia lab as an example once again, most users will want to do something like: + +```bash +ssh teuthology.front.sepia.ceph.com +~/teuthology/virtualenv/bin/teuthology-lock \ + --lock-many 1 \ + --machine-type smithi \ + --desc "teuthology dev testing" +``` + +When you are done, don't forget to unlock! + +#### Using Lab Machines + +Once you have your machines locked, you need to provide a list of their hostnames and their machine type: + +```bash +export TESTNODES="smithi999.front.sepia.ceph.com,smithi123.front.sepia.ceph.com" +export MACHINE_TYPE="smithi" +``` + +If the lab uses a "secrets" or "inventory" repository for [ceph-cm-ansible](https://github.com/ceph/ceph-cm-ansible), you'll need to provide a URL for that. In Sepia: +```bash +export ANSIBLE_INVENTORY_REPO="https://github.com/ceph/ceph-sepia-secrets" +``` +This repo will be cloned locally, using your existing `git` configuration, and copied into the teuthology container at build time. + +## Running Tests + +To run the default `teuthology:no-ceph` suite in one-shot mode: +```bash +./start.sh +``` + +To run in wait mode: +```bash +TEUTHOLOGY_WAIT=1 ./start.sh +``` + +To schedule tests in wait mode: +```bash +docker exec docker-compose_teuthology_1 /venv/bin/teuthology-suite ... +``` \ No newline at end of file diff --git a/docs/docker-compose/db/01-init.sh b/docs/docker-compose/db/01-init.sh new file mode 100755 index 0000000000..b9e5adc2ff --- /dev/null +++ b/docs/docker-compose/db/01-init.sh @@ -0,0 +1,8 @@ +set -e +export PGPASSWORD=$POSTGRES_PASSWORD; +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE USER $APP_DB_USER WITH PASSWORD '$APP_DB_PASS'; + CREATE DATABASE $APP_DB_NAME; + GRANT ALL PRIVILEGES ON DATABASE $APP_DB_NAME TO $APP_DB_USER; + \connect $APP_DB_NAME $APP_DB_USER +EOSQL \ No newline at end of file diff --git a/docs/docker-compose/docker-compose.yml b/docs/docker-compose/docker-compose.yml new file mode 100644 index 0000000000..4276fc456c --- /dev/null +++ b/docs/docker-compose/docker-compose.yml @@ -0,0 +1,92 @@ +version: '3.8' + +services: + postgres: + image: postgres:latest + healthcheck: + test: [ "CMD", "pg_isready", "-q", "-d", "paddles", "-U", "admin" ] + timeout: 5s + interval: 10s + retries: 2 + environment: + - POSTGRES_USER=root + - POSTGRES_PASSWORD=password + - APP_DB_USER=admin + - APP_DB_PASS=password + - APP_DB_NAME=paddles + volumes: + - ./db:/docker-entrypoint-initdb.d/ + ports: + - 5432:5432 + paddles: + image: quay.io/ceph-infra/paddles + environment: + PADDLES_SERVER_HOST: 0.0.0.0 + PADDLES_SQLALCHEMY_URL: postgresql+psycopg2://admin:password@postgres:5432/paddles + depends_on: + postgres: + condition: service_healthy + links: + - postgres + healthcheck: + test: ["CMD", "curl", "-f", "http://0.0.0.0:8080"] + timeout: 5s + interval: 30s + retries: 2 + ports: + - 8080:8080 + pulpito: + image: quay.io/ceph-infra/pulpito + environment: + PULPITO_PADDLES_ADDRESS: http://paddles:8080 + depends_on: + paddles: + condition: service_healthy + links: + - paddles + healthcheck: + test: ["CMD", "curl", "-f", "http://0.0.0.0:8081"] + timeout: 5s + interval: 10s + retries: 2 + ports: + - 8081:8081 + beanstalk: + build: ../../beanstalk/alpine + ports: + - "11300:11300" + teuthology: + build: + context: ../../ + dockerfile: ./docs/docker-compose/teuthology/Dockerfile + args: + SSH_PRIVKEY_FILE: $SSH_PRIVKEY_FILE + depends_on: + paddles: + condition: service_healthy + links: + - paddles + - beanstalk + environment: + SSH_PRIVKEY: + SSH_PRIVKEY_FILE: + MACHINE_TYPE: + TESTNODES: + TEUTHOLOGY_WAIT: + TEUTH_BRANCH: + testnode: + build: + context: ./testnode + dockerfile: ./Dockerfile + deploy: + replicas: 3 + depends_on: + paddles: + condition: service_healthy + links: + - paddles + ports: + - "22" + environment: + SSH_PUBKEY: + platform: linux/amd64 diff --git a/docs/docker-compose/start.sh b/docs/docker-compose/start.sh new file mode 100755 index 0000000000..871258c5be --- /dev/null +++ b/docs/docker-compose/start.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -e +export TEUTHOLOGY_BRANCH=${TEUTHOLOGY_BRANCH:-$(git branch --show-current)} +export TEUTH_BRANCH=${TEUTHOLOGY_BRANCH} +if [ -n "$ANSIBLE_INVENTORY_REPO" ]; then + basename=$(basename $ANSIBLE_INVENTORY_REPO | cut -d. -f1) + if [ ! -d "$basename" ]; then + git clone \ + --depth 1 \ + $ANSIBLE_INVENTORY_REPO + fi + mkdir -p teuthology/ansible_inventory + cp -rf $basename/ansible/ teuthology/ansible_inventory + if [ ! -d teuthology/ansible_inventory/hosts ]; then + mv -f teuthology/ansible_inventory/inventory teuthology/ansible_inventory/hosts + fi +fi +# Make the hosts and secrets directories, so that the COPY instruction in the +# Dockerfile does not cause a build failure when not using this feature. +mkdir -p teuthology/ansible_inventory/hosts teuthology/ansible_inventory/secrets + +if [ -n "$CUSTOM_CONF" ]; then + cp "$CUSTOM_CONF" teuthology/ +fi + +# Generate an SSH keypair to use if necessary +if [ -z "$SSH_PRIVKEY_PATH" ]; then + SSH_PRIVKEY_PATH=$(mktemp -u /tmp/teuthology-ssh-key-XXXXXX) + ssh-keygen -t rsa -N '' -f $SSH_PRIVKEY_PATH + export SSH_PRIVKEY=$(cat $SSH_PRIVKEY_PATH) + export SSH_PUBKEY=$(cat $SSH_PRIVKEY_PATH.pub) + export SSH_PRIVKEY_FILE=id_rsa +else + export SSH_PRIVKEY=$(cat $SSH_PRIVKEY_PATH) + export SSH_PRIVKEY_FILE=$(basename $SSH_PRIVKEY_PATH | cut -d. -f1) +fi + +if [ -z "$TEUTHOLOGY_WAIT" ]; then + DC_EXIT_FLAG='--abort-on-container-exit --exit-code-from teuthology' + DC_AUTO_DOWN_CMD='docker-compose down' +fi +export TEUTHOLOGY_WAIT + +trap "docker-compose down" SIGINT +docker-compose up \ + --build \ + $DC_EXIT_FLAG +$DC_AUTO_DOWN_CMD diff --git a/docs/docker-compose/testnode/Dockerfile b/docs/docker-compose/testnode/Dockerfile new file mode 100644 index 0000000000..016d32117a --- /dev/null +++ b/docs/docker-compose/testnode/Dockerfile @@ -0,0 +1,26 @@ +FROM ubuntu:focal +ENV DEBIAN_FRONTEND=noninteractive +RUN apt update && \ + apt -y install \ + sudo \ + openssh-server \ + hostname \ + curl \ + python3-pip \ + apache2 \ + nfs-kernel-server && \ + apt clean all +COPY testnode_start.sh / +COPY testnode_stop.sh / +COPY testnode_sudoers /etc/sudoers.d/teuthology +RUN \ + ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key -N '' && \ + sed -i 's/#PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config && \ + mkdir -p /root/.ssh && \ + chmod 700 /root/.ssh && \ + useradd -g sudo ubuntu && \ + mkdir -p /home/ubuntu/.ssh && \ + chmod 700 /home/ubuntu/.ssh && \ + chown -R ubuntu /home/ubuntu +EXPOSE 22 +ENTRYPOINT /testnode_start.sh diff --git a/docs/docker-compose/testnode/testnode_start.sh b/docs/docker-compose/testnode/testnode_start.sh new file mode 100755 index 0000000000..d29c3b6d0a --- /dev/null +++ b/docs/docker-compose/testnode/testnode_start.sh @@ -0,0 +1,13 @@ +#!/usr/bin/bash +set -x +echo "$SSH_PUBKEY" > /root/.ssh/authorized_keys +echo "$SSH_PUBKEY" > /home/ubuntu/.ssh/authorized_keys +chown ubuntu /home/ubuntu/.ssh/authorized_keys +payload="{\"name\": \"$(hostname)\", \"machine_type\": \"testnode\", \"up\": true, \"locked\": false, \"os_type\": \"ubuntu\", \"os_version\": \"20.04\"}" +for i in $(seq 1 5); do + echo "attempt $i" + curl -v -f -d "$payload" http://paddles:8080/nodes/ && break + sleep 1 +done +mkdir -p /run/sshd +exec /usr/sbin/sshd -D diff --git a/docs/docker-compose/testnode/testnode_stop.sh b/docs/docker-compose/testnode/testnode_stop.sh new file mode 100755 index 0000000000..2e1044d807 --- /dev/null +++ b/docs/docker-compose/testnode/testnode_stop.sh @@ -0,0 +1,10 @@ +#!/usr/bin/bash +set -x +hostname=$(hostname) +payload="{\"name\": \"$hostname\", \"machine_type\": \"testnode\", \"up\": false}" +for i in $(seq 1 5); do + echo "attempt $i" + curl -s -f -X PUT -d "$payload" http://paddles:8080/nodes/$hostname/ && break + sleep 1 +done +pkill sshd \ No newline at end of file diff --git a/docs/docker-compose/testnode/testnode_sudoers b/docs/docker-compose/testnode/testnode_sudoers new file mode 100644 index 0000000000..35828ad9be --- /dev/null +++ b/docs/docker-compose/testnode/testnode_sudoers @@ -0,0 +1,4 @@ +%sudo ALL=(ALL) NOPASSWD: ALL +# For ansible pipelining +Defaults !requiretty +Defaults visiblepw diff --git a/docs/docker-compose/teuthology/.teuthology.yaml b/docs/docker-compose/teuthology/.teuthology.yaml new file mode 100644 index 0000000000..bac8ec1aaf --- /dev/null +++ b/docs/docker-compose/teuthology/.teuthology.yaml @@ -0,0 +1,9 @@ +queue_host: beanstalk +queue_port: 11300 +lock_server: http://paddles:8080 +results_server: http://paddles:8080 +results_ui_server: http://pulpito:8081/ +teuthology_path: /teuthology +archive_base: /archive_dir +reserve_machines: 0 +lab_domain: '' \ No newline at end of file diff --git a/docs/docker-compose/teuthology/Dockerfile b/docs/docker-compose/teuthology/Dockerfile new file mode 100644 index 0000000000..5587489d17 --- /dev/null +++ b/docs/docker-compose/teuthology/Dockerfile @@ -0,0 +1,43 @@ +FROM ubuntu:latest +ARG SSH_PRIVKEY_FILE=id_ed25519 +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && \ + apt-get install -y \ + git \ + qemu-utils \ + python3-dev \ + libssl-dev \ + ipmitool \ + python3-pip \ + python3-venv \ + vim \ + libev-dev \ + libvirt-dev \ + libffi-dev \ + libyaml-dev \ + lsb-release && \ + apt-get clean all +WORKDIR /teuthology +COPY requirements.txt bootstrap /teuthology/ +RUN \ + cd /teuthology && \ + mkdir ../archive_dir && \ + mkdir log && \ + chmod +x /teuthology/bootstrap && \ + PIP_INSTALL_FLAGS="-r requirements.txt" ./bootstrap +COPY . /teuthology +RUN \ + ./bootstrap +COPY docs/docker-compose/teuthology/containerized_node.yaml /teuthology +COPY docs/docker-compose/teuthology/.teuthology.yaml /root +COPY docs/docker-compose/teuthology/teuthology.sh / +RUN mkdir -p /etc/ansible +COPY docs/docker-compose/teuthology/ansible_inventory/hosts /etc/ansible/ +COPY docs/docker-compose/teuthology/ansible_inventory/secrets /etc/ansible/ +RUN \ + mkdir $HOME/.ssh && \ + touch $HOME/.ssh/${SSH_PRIVKEY_FILE} && \ + chmod 600 $HOME/.ssh/${SSH_PRIVKEY_FILE} && \ + echo "StrictHostKeyChecking=no" > $HOME/.ssh/config && \ + echo "UserKnownHostsFile=/dev/null" >> $HOME/.ssh/config +ENTRYPOINT /teuthology.sh \ No newline at end of file diff --git a/docs/docker-compose/teuthology/containerized_node.yaml b/docs/docker-compose/teuthology/containerized_node.yaml new file mode 100644 index 0000000000..02304886c4 --- /dev/null +++ b/docs/docker-compose/teuthology/containerized_node.yaml @@ -0,0 +1,8 @@ +overrides: + ansible.cephlab: + skip_tags: "timezone,nagios,monitoring-scripts,ssh,hostname,pubkeys,zap,sudoers,kerberos,selinux,lvm,ntp-client,resolvconf,packages,cpan,nfs" + vars: + containerized_node: true + ansible_user: root + cm_user: root + start_rpcbind: false diff --git a/docs/docker-compose/teuthology/teuthology.sh b/docs/docker-compose/teuthology/teuthology.sh new file mode 100755 index 0000000000..0378f93d44 --- /dev/null +++ b/docs/docker-compose/teuthology/teuthology.sh @@ -0,0 +1,46 @@ +#!/usr/bin/bash +set -e +# We don't want -x yet, in case the private key is sensitive +if [ -n "$SSH_PRIVKEY_FILE" ]; then + echo "$SSH_PRIVKEY" > $HOME/.ssh/$SSH_PRIVKEY_FILE +fi +source /teuthology/virtualenv/bin/activate +set -x +if [ -n "$TESTNODES" ]; then + for node in $(echo $TESTNODES | tr , ' '); do + teuthology-update-inventory -m $MACHINE_TYPE $node + done + CUSTOM_CONF=${CUSTOM_CONF:-} +else + CUSTOM_CONF=/teuthology/containerized_node.yaml +fi +export MACHINE_TYPE=${MACHINE_TYPE:-testnode} +if [ -z "$TEUTHOLOGY_WAIT" ]; then + if [ -n "$TEUTH_BRANCH" ]; then + TEUTH_BRANCH_FLAG="--teuthology-branch $TEUTH_BRANCH" + fi + teuthology-suite -v \ + $TEUTH_BRANCH_FLAG \ + --ceph-repo https://github.com/ceph/ceph.git \ + --suite-repo https://github.com/ceph/ceph.git \ + -c main \ + -m $MACHINE_TYPE \ + --limit 1 \ + -n 100 \ + --suite teuthology:no-ceph \ + --filter-out "libcephfs,kclient,stream,centos,rhel" \ + -d ubuntu -D 20.04 \ + --suite-branch main \ + --subset 9000/100000 \ + -p 75 \ + --seed 349 \ + --force-priority \ + $CUSTOM_CONF + DISPATCHER_EXIT_FLAG='--exit-on-empty-queue' + teuthology-queue -m $MACHINE_TYPE -s | \ + python3 -c "import sys, json; assert json.loads(sys.stdin.read())['count'] > 0, 'queue is empty!'" +fi +teuthology-dispatcher -v \ + --log-dir /teuthology/log \ + --tube $MACHINE_TYPE \ + $DISPATCHER_EXIT_FLAG diff --git a/docs/downburst_vms.rst b/docs/downburst_vms.rst new file mode 100644 index 0000000000..d649be66b1 --- /dev/null +++ b/docs/downburst_vms.rst @@ -0,0 +1,89 @@ +.. _downburst_vms: + +============= +Downburst VMs +============= + +Teuthology also supports virtual machines via `downburst +`__, which can function like physical +machines but differ in the following ways: + +VPS Hosts: +-------- +The following description is based on the Red Hat lab used by the upstream Ceph +development and quality assurance teams. + +The teuthology database of available machines contains a vpshost field. +For physical machines, this value is null. For virtual machines, this entry +is the name of the physical machine that that virtual machine resides on. + +There are fixed "slots" for virtual machines that appear in the teuthology +database. These slots have a machine type of vps and can be locked like +any other machine. The existence of a vpshost field is how teuthology +knows whether or not a database entry represents a physical or a virtual +machine. + +In order to get the right virtual machine associations, the following needs +to be set in ~/.config/libvirt/libvirt.conf or for some older versions +of libvirt (like ubuntu precise) in ~/.libvirt/libvirt.conf:: + + uri_aliases = [ + 'mira001=qemu+ssh://ubuntu@mira001.front.sepia.ceph.com/system?no_tty=1', + 'mira003=qemu+ssh://ubuntu@mira003.front.sepia.ceph.com/system?no_tty=1', + 'mira004=qemu+ssh://ubuntu@mira004.front.sepia.ceph.com/system?no_tty=1', + 'mira005=qemu+ssh://ubuntu@mira005.front.sepia.ceph.com/system?no_tty=1', + 'mira006=qemu+ssh://ubuntu@mira006.front.sepia.ceph.com/system?no_tty=1', + 'mira007=qemu+ssh://ubuntu@mira007.front.sepia.ceph.com/system?no_tty=1', + 'mira008=qemu+ssh://ubuntu@mira008.front.sepia.ceph.com/system?no_tty=1', + 'mira009=qemu+ssh://ubuntu@mira009.front.sepia.ceph.com/system?no_tty=1', + 'mira010=qemu+ssh://ubuntu@mira010.front.sepia.ceph.com/system?no_tty=1', + 'mira011=qemu+ssh://ubuntu@mira011.front.sepia.ceph.com/system?no_tty=1', + 'mira013=qemu+ssh://ubuntu@mira013.front.sepia.ceph.com/system?no_tty=1', + 'mira014=qemu+ssh://ubuntu@mira014.front.sepia.ceph.com/system?no_tty=1', + 'mira015=qemu+ssh://ubuntu@mira015.front.sepia.ceph.com/system?no_tty=1', + 'mira017=qemu+ssh://ubuntu@mira017.front.sepia.ceph.com/system?no_tty=1', + 'mira018=qemu+ssh://ubuntu@mira018.front.sepia.ceph.com/system?no_tty=1', + 'mira020=qemu+ssh://ubuntu@mira020.front.sepia.ceph.com/system?no_tty=1', + 'mira024=qemu+ssh://ubuntu@mira024.front.sepia.ceph.com/system?no_tty=1', + 'mira029=qemu+ssh://ubuntu@mira029.front.sepia.ceph.com/system?no_tty=1', + 'mira036=qemu+ssh://ubuntu@mira036.front.sepia.ceph.com/system?no_tty=1', + 'mira043=qemu+ssh://ubuntu@mira043.front.sepia.ceph.com/system?no_tty=1', + 'mira044=qemu+ssh://ubuntu@mira044.front.sepia.ceph.com/system?no_tty=1', + 'mira074=qemu+ssh://ubuntu@mira074.front.sepia.ceph.com/system?no_tty=1', + 'mira079=qemu+ssh://ubuntu@mira079.front.sepia.ceph.com/system?no_tty=1', + 'mira081=qemu+ssh://ubuntu@mira081.front.sepia.ceph.com/system?no_tty=1', + 'mira098=qemu+ssh://ubuntu@mira098.front.sepia.ceph.com/system?no_tty=1', + ] + +Downburst: +---------- + +When a virtual machine is locked, downburst is run on that machine to install a +new image. This allows the user to set different virtual OSes to be installed +on the newly created virtual machine. Currently the default virtual machine is +ubuntu (precise). A different vm installation can be set using the +``--os-type`` and ``--os-version`` options in ``teuthology.lock``. + +When a virtual machine is unlocked, downburst destroys the image on the +machine. + +To find the downburst executable, teuthology first checks the PATH environment +variable. If not defined, teuthology next checks for +src/downburst/virtualenv/bin/downburst executables in the user's home +directory, /home/ubuntu, and /home/teuthology. This can all be overridden if +the user specifies a downburst field in the user's .teuthology.yaml file. + +Host Keys: +---------- + +Because teuthology reinstalls a new machine, a new hostkey is generated. After +locking, once a connection is established to the new machine, +``teuthology-lock`` with the ``--list`` or ``--list-targets`` options will +display the new keys. When vps machines are locked using the ``--lock-many`` +option, a message is displayed indicating that ``--list-targets`` should be run +later. + +Assumptions: +------------ + +It is assumed that downburst is on the user's ``$PATH``. diff --git a/docs/fragment_merging.rst b/docs/fragment_merging.rst new file mode 100644 index 0000000000..fd0a7663d0 --- /dev/null +++ b/docs/fragment_merging.rst @@ -0,0 +1,318 @@ +.. _fragment_merging: + +================ +Fragment Merging +================ + +Once the matrix of YAML fragments is contructed by teuthology, the fragments +must be merged together and processed. Up until 2022, this merging process was +static: all of the YAML fragments were joined together in lexicographical order +with duplicate fragment members *deep merged* together (e.g. the "tasks" +array). Now, fragments and entire job specifications can be dynamically changed +or dropped according to Lua scripts embedded in the fragment. + +premerge Scripts +================ + +The first phase of script execution takes place in the *premerge* step. Each +fragment may have its own premerge script which is run before the fragment is +merged. The script is defined as follows:: + + teuthology: + premerge: | + if yaml.os_type == 'ubuntu' then reject() end + +Again, this script will run prior to the YAML fragment merging into the +complete YAML specification for a job. The script has access to the YAML job +description (the ``yaml`` variable) generated so far from the fragments merged +prior to this one (remember: fragments are ordered lexicographically). In the +above case, the ``os_type`` is checked such that the fragment is dropped +(rejected) if the job is configured to run on Ubuntu. Note: this does not +account for a jobs' default os_type which is not yet known; only the +``os_type`` specified by the YAML fragments is usable in these scripts. + +When run in the premerge step, the ``reject`` function causes the fragment to be +dropped from the job: none of its YAML will be merged into the job. The +``accept`` function causes the fragment to be merged. The default action is to +accept the fragment. + +postmerge Scripts +================= + +The second phase of script execution is the *postmerge* step run after all +fragments have been merged. At this point, the YAML specification for the job +is all but complete. Scripts can now make final modifications to the YAML or +reject the job completely causing it to be removed from the list of jobs to be +scheduled. An example postmerge script:: + + teuthology: + postmerge: + - if yaml.os_type == "ubuntu" then reject() end + +This script is the same but has a different effect: after combining all the +YAML fragments for a job, if the os_type is "ubuntu" then the entire job is +dropped (filtered out / rejected). postmerge scripts are also specified as a +list of strings in the ``teuthology.postmerge`` array which may span multiple +fragments. During the postmerge step, all of these strings are concatenated and +then executed as a single script. You may use this to define variables, +functions, or anything else you need. + +Scripts have access to the entire yaml object and may use it to do advanced +checks. It is also possible to programatically change the YAML definition:: + + teuthology: + postmerge: + - | + -- use the lupa "attrgetter" to fetch attrs not items via Lua's indexing + local attr = py_attrgetter + local tasks = py_list() + for i = 1, 3 do + local task = py_dict() + task.exec = py_dict() + task.exec["mon.a"] = py_list() + attr(task.exec["mon.a"]).append("echo "..i) + attr(tasks).append(task) + end + deep_merge(yaml.tasks, tasks) + + +This will be as if the YAML fragment contained:: + + tasks: + - exec: + mon.a: + - echo 1 + - exec: + mon.a: + - echo 2 + - exec: + mon.a: + - echo 3 + +Except the tasks are appended to the end after all fragments have been loaded. +This is opposed to the normal mode of the tasks appending when the fragment is +merged (in lexicographic order). + +API +=== + +Scripts are well sandboxed with access to a small selection of the Lua builtin +libraries. There is also access to some Python/Lupa specific functions which +are prefixed with ``py_``. No I/O or other system functions permitted. + +The Lua builtins available include:: + + assert + error + ipairs + pairs + tonumber + tostring + +Additionally, the Python functions exposed via Lupa include:: + + py_attrgetter = python.as_attrgetter + py_dict = python.builtins.dict + py_list = python.builtins.list + py_tuple = python.builtins.tuple + py_enumerate = python.enumerate + py_iterex = python.iterex + py_itemgetter = python.as_itemgetter + +These are all prefixed with ``py_``. See the `Lupa documentation +`__ for more information. + +Finally, teuthology exposes the following functions for scripts: + +:: + + accept() + +The ``accept`` function stops script execution and causes the fragment to be +merged (premerge script) or the job to be accepted for scheduling (postmerge +script). The default action of a script is to accept. + +:: + + reject() + +The ``reject`` function stops script execution and causes the fragment to be +dropped (premerge script) or the job to be rejected for scheduling (postmerge +script). + + +:: + + deep_merge(a, b) + +The ``deep_merge`` function comes from the teuthology code base. It's used to +merge YAML structures. It's provided for convenience to ease a common operation +on Python (yaml) objects. The function merges ``b`` into ``a``. + + +:: + + log + +The ``log`` Python class (object) allows Lua to leave debugging in the +``teuthology-suite`` log. + +:: + + yaml_load(str) + +This function loads the YAML string and returns it as a Python structure (of +dicts, lists, etc.). + + +Concrete Example +================ + +The +`fs:upgrade:mds_upgrade_sequence `__ +sub-suite tests that the `upgrade sequence for CephFS `__ +is followed when the cluster is managed by cephadm. The most interesting set of YAML in this suite is in ``tasks/``:: + + % + 0-from/ + pacific.yaml + v16.2.4.yaml + 1-volume/ + 0-create.yaml + 1-ranks/ + 1.yaml + 2.yaml + 2-allow_standby_replay/ + yes.yaml + no.yaml + 3-inline + yes.yaml + no.yaml + 4-verify.yaml + 2-client.yaml + 3-upgrade-with-workload.yaml + 4-verify.yaml + +Basically: upgrade the cluster from one of two versions of pacific, create a +volume (fs), possibly turn some knobs in the MDSMap, and verify the upgrade +completes correctly. This works well and is an excellent example of effective +matrix construction for testing. + +The feature we want to test is a `new upgrade procedure +`__ for the MDS. It only requires +"failing" the file systems which removes all running MDS from the MDSMap and +prevents any MDS from "joining" the file system (becoming active). The upgrade +procedure then upgrades the packages, restarts the MDS, then sets the file +system to allow MDS to join (become active). Ideally, we could modify the +matrix this way:: + + % + fail_fs/ + yes.yaml + no.yaml + tasks/ + % + 0-from/ + pacific.yaml + v16.2.4.yaml + 1-volume/ + 0-create.yaml + 1-ranks/ + 1.yaml + 2.yaml + 2-allow_standby_replay/ + yes.yaml + no.yaml + 3-inline + yes.yaml + no.yaml + 4-verify.yaml + 2-client.yaml + 3-upgrade-with-workload.yaml + 4-verify.yaml + +So we just change (or don't change) a single config option in ``fail_fs`` +which turns on that upgrade path:: + + overrides: + ceph: + conf: + mgr: + mgr/orchestrator/fail_fs: true + +The complication however is that this new ``fail_fs`` config option is only +understood by the newest mgr (the ``main`` branch or possibly the latest +pacific or quincy)... and the mons won't let you set a config unknown to exist. +So, we must do a staggered upgrade to test this new upgrade path: the mgr must +be upgraded, a config option set to change how MDS upgrades are performed, and +then the cluster may continue upgrading. + +**Here's the problem**: the mgr only knows how to do a staggered upgrade +beginning with v16.2.10. So, we can't even upgrade from v16.2.4 to test this +new upgrade path. + +(One might be tempted to remove v16.2.4 as an upgrade path in +QA but we must continue testing this due to major (breaking) changes in the +MDSMap across v16.2.4 and v16.2.5. It would not be acceptable to remove it.) + +To get around this awkward problem, we can use the new scripting of fragment +merging to control whether this ``mgr/orchestrator/fail_fs`` config option is +set. If we are upgrading from v16.2.4, then drop any jobs in the matrix that +also want to test this new MDS upgrade procedure. So we modify the yaml +fragments as:: + + fail_fs/no.yaml: + teuthology: + variables: + fail_fs: false + overrides: + ceph: + conf: + mgr: + mgr/orchestrator/fail_fs: false + + fail_fs/yes.yaml: + teuthology: + variables: + fail_fs: true + overrides: + ceph: + conf: + mgr: + mgr/orchestrator/fail_fs: true + + tasks/0-from/v16.2.4.yaml: + teuthology: + postmerge: + - if yaml.teuthology.variables.fail_fs then reject() end + ... + + +We have set a variable (for ease of programming) in a +``teuthology['variables']`` dictionary which indicates whether the merged YAML +includes the ``fail_fs`` feature or not. Then, if we're upgrading from v16.2.4 +and that variable is true, drop that set of jobs in the matrix. This +effectively prevents any testing of this upgrade procedure when the cluster is +upgraded from v16.2.4. + +Note: the final merged QA code also includes a YAML fragment to perform a +staggered upgrade of the ``ceph-mgr``. This YAML fragment is dropped using a +premerge script if we're not testing ``fail_fs``; there is no reason to do a +staggered upgrade if we don't need to. See the code if you'd like to see how +that works! + + +Why Lua +======= + +Lua is a small, extensible, and easily sandboxed scripting environment. Python +is difficult to sandbox correctly and its restrictions make it difficult to +embed in YAML (like indentation for code blocks). + + +Python-Lua +========== + +`Lupa `__ is the most recent derivative of the +"lunatic" python project. It allows for trivial cross-talk between Python and +Lua worlds. diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..a218ae781a --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,25 @@ +Content Index +============= + +.. toctree:: + :maxdepth: 2 + + README.rst + intro_testers.rst + fragment_merging.rst + siteconfig.rst + detailed_test_config.rst + openstack_backend.rst + libcloud_backend.rst + downburst_vms.rst + INSTALL.rst + LAB_SETUP.rst + commands/list.rst + ChangeLog.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/intro_testers.rst b/docs/intro_testers.rst new file mode 100644 index 0000000000..2abf974744 --- /dev/null +++ b/docs/intro_testers.rst @@ -0,0 +1,81 @@ +.. _intro_testers: + +======================== +Introduction for Testers +======================== + +This document is aimed at providing an introduction to running existing test suites. + +We assume here that you have access to an operational test lab; if not, ask +your local admin for access! + +If you're here to test upstream Ceph, start `here +`__. + + +Terminology +=========== + +In the abstract, each set of tests is defined by a `suite`. All of our suites +live in the `ceph` git repository in the `qa/suites/ directory +`__ . +Each subdirectory in `suites` is a suite; they may also have "sub-suites" which +may aid in scheduling, for example, tests for a specific feature. + +In concrete terms, a `run` is what is created by assembling the contents of a +`suite` into a number of `jobs`. A `job` is created by assembling a number of +`fragments` (also known as `facets`) together. Each `fragment` is in `YAML +`__ format. + +Each `job` definition contains a list of `tasks` to execute, along with +`roles`. `Roles` tell `teuthology` how many nodes to use for each `job` along +with what functions each node will perform. + +To go into more depth regarding suite design, see the `README +`__. + +One example of this is the `smoke +`__ suite. + + +Scheduling +========== +Most testing happens by scheduling `runs`. The way we do that is using the +`teuthology-suite` command. + +To get a preview of what `teuthology-suite` might do, try:: + + teuthology-suite -v -m mira --ceph-repo http://github.com/ceph/ceph.git -c main --suite-repo http://github.com/ceph/ceph.git -s smoke --dry-run + +The `-m mira` specifies `mira` as the machine type. Machine types are dependent +on the specific lab in use. The `--ceph-repo http://github.com/ceph/ceph.git` +specifies from which git repository to pull `-c main`. Similarly, +`--suite-repo` is specifying where to find the QA branch. The default for +`--ceph-repo` and `--suite-repo` is `http://github.com/ceph/ceph-ci.git` which +is usually what you will want. For `main`, you must always use +`http://github.com/ceph/ceph.git` as it does not exist on the ceph-ci +repository. + +Assuming a build is available, that should pretend to schedule several jobs. If +it complains about missing packages, try swapping `main` with `jewel` or one +of the other Ceph stable branches. + +To see even more detail, swap `-v` with `-vv`. It will print out each job +definition in full. To limit the number of jobs scheduled, you may want to use +the `--limit`, `--filter`, or `--filter-out` flags. + +To actually schedule, drop `--dry-run` and optionally use the `--email` flag to +get an email when the test run completes. + +`teuthology-suite` also prints out a link to the run in `pulpito +`__ that will display the current status of +each job. The Sepia lab's pulpito instance is `here +`__. + +There may be times when, after scheduling a run containing a large number of +jobs, that you want to reschedule only those jobs which have failed or died for +some other reason. For that use-case, `teuthology-suite` has a `--rerun`/`-r` +flag, and an optional `--rerun-statuses`/`-R` flag. An example of its usage +is:: + + teuthology-suite -m smithi -c wip-pdonnell-testing-20170718 --rerun pdonnell-2017-07-19_19:04:52-multimds-wip-pdonnell-testing-20170718-testing-basic-smithi -R dead --dry-run diff --git a/docs/laptop/README.md b/docs/laptop/README.md new file mode 100644 index 0000000000..4c3b9a428d --- /dev/null +++ b/docs/laptop/README.md @@ -0,0 +1,434 @@ +# Teuthology Development Environment Guide + +This is a brief guide how to setup teuthology development environment +on your laptop (desktop if you wish). Though everything in this guide +can be implemented as one handy script, some more details how things +work can be helpful to document. + +## Introduction + +Teuthology consists from the following components: + +teuthology - the core framework which can run a job, +the config file which describes test environment +and task list to execute. + +- paddles - a database and the api +- pulpito - web gui for paddles +- beanstalkd - the job queue + +The teuthology core includes following main tools: +- teuthology-suite +- teuthology-schedule +- teuthology-worker +- teuthology (formerly teuthology-run). +- teuthology-lock - allows to lock and provision nodes + separately from run. + +## Docker + +Though paddles and pulpito can be run as services using supervisord +it is often useful to have them isolated in a container. +There can be used any of available tools, but here are example for +bare docker. + +### Start docker and add shared network + +Add your user to docker group and start the service: + +```bash +sudo usermod -aG docker $USER +sudo service docker start +``` + +Create paddles network for container interaction: + +```bash +docker network create paddles +``` + +### Run postgres + +Start postgres containers in order to use paddles: + +```bash +mkdir $HOME/.teuthology/postgres +docker run -d -p 5432:5432 --network paddles --name paddles-postgres \ + -e POSTGRES_PASSWORD=secret \ + -e POSTGRES_USER=paddles \ + -e POSTGRES_DB=paddles \ + -e PGDATA=/var/lib/postgresql/data/pgdata \ + -v $HOME/.teuthology/postgres:/var/lib/postgresql/data postgres +``` + +### Run paddles + +Checkout paddles and build the image: + +```bash +cd ~/paddles && docker build . --file Dockerfile --tag paddles +``` + +Run the container with previously created network: + +```bash +docker run -d --network paddles --name api -p 80:8080 \ + -e PADDLES_SERVER_HOST=0.0.0.0 \ + -e PADDLES_SQLALCHEMY_URL=postgresql+psycopg2://paddles:secret@paddles-postgres/paddles \ + paddles +``` + +### Run pulpito + +Checkout pulpito and build the image: + +```bash +cd ~/pulpito && docker build . --file Dockerfile --tag pulpito +``` + +Run the container: + +```bash +docker run -d --network paddles --name web -p 8081:8081 -e PULPITO_PADDLES_ADDRESS=http://api:8080 pulpito +``` + +NOTE. Restart pulpito container: + +```bash +docker kill web ; docker container rm web +``` + +NOTE. You can check all listening ports by: + +```bash +sudo lsof -i -P -n | grep LISTEN +``` + +NOTE. You can check database connection using: + +```bash +psql -h localhost -U paddles -l +``` + +## Setup Libvirt for Downburst + +Add libvirt host nodes: + +```sql +insert into nodes (name, machine_type, is_vm, locked, up) values ('localhost', 'libvirt', false, true, true); +insert into nodes (name, machine_type, is_vm, locked, up, mac_address, vm_host_id) values ('target-00.local', 'vps', true, false, false, '52:54:00:00:00:00', (select id from nodes where name='localhost')); +insert into nodes (name, machine_type, is_vm, locked, up, mac_address, vm_host_id) values ('target-01.local', 'vps', true, false, false, '52:54:00:00:00:01', (select id from nodes where name='localhost')); +insert into nodes (name, machine_type, is_vm, locked, up, mac_address, vm_host_id) values ('target-02.local', 'vps', true, false, false, '52:54:00:00:00:02', (select id from nodes where name='localhost')); +insert into nodes (name, machine_type, is_vm, locked, up, mac_address, vm_host_id) values ('target-03.local', 'vps', true, false, false, '52:54:00:00:00:03', (select id from nodes where name='localhost')); +``` +or just use the following command: + +```bash +psql -h localhost -U paddles -d paddles < docs/laptop/targets.sql +``` + +Add libvirt config file so downburst able to use 'localhost' node to connect to: + +```bash +cat > ~/.config/libvirt/libvirt.conf << END +uri_aliases = [ + 'localhost=qemu:///system?no_tty=1', +] + +END +``` + +Add your user to wheel group and allow to wheel group to passwordless access libvirt: + +```bash + +sudo usermod -a -G wheel $USER + +``` + +Allow users in wheel group to manage the libvirt daemon without authentication: + +```bash + +sudo tee /etc/polkit-1/rules.d/50-libvirt.rules << END +polkit.addRule(function(action, subject) { + if (action.id == "org.libvirt.unix.manage" && + subject.isInGroup("wheel")) { + return polkit.Result.YES; + } +}); + +END + +``` + +(Taken from: https://octetz.com/docs/2020/2020-05-06-linux-hypervisor-setup/) + +Make sure libvirtd is running: + +```bash +sudo service libvirtd start +``` + +NOTE. You can check you are able to access libvirt without password: + +```bash + +virsh -c qemu:///system list + +``` + +Make sure libvirt front network exists, it can be defined as NAT and +include dhcp records for the target nodes: + +```xml + + front + + + + + + + + + + + + + + +``` +for example: + +```bash +virsh -c qemu:///system net-define docs/laptop/front.xml + +``` + +(for details, look https://jamielinux.com/docs/libvirt-networking-handbook/appendix/dhcp-host-entries.html) + +Add corresponding records to your /etc/hosts: + +```txt +192.168.123.100 target-00 target-00.local +192.168.123.101 target-01 target-01.local +192.168.123.102 target-02 target-02.local +192.168.123.103 target-03 target-03.local +``` +you can take it from corresponding file: +``` +sudo tee -a /etc/hosts < docs/laptop/hosts +``` + +Make sure the front network is up: + +```bash +sudo virsh net-start front +``` + +NOTE. The 'default' volume pool should be up and running before trying downburst or teuthology-lock. + +```bash +> sudo virsh pool-list --all + Name State Autostart +------------------------------- + default active no +``` + + +## Setup teuthology virtual environment + + +Checkout the teuthology core repo and run the bootstrap script: +```bash +git clone https://github.com/ceph/teuthology ~/teuthology +cd ~/teuthology && ./bootstrap +. virtualenv/bin/activate +``` + +By default the `./bootstrap` script is installing teuthology in development mode +to the `virtualenv` directory. + +Create teuthology config file `~/.teuthology.yaml`: + +```bash +cat > ~/.teuthology.yaml << END +# replace $HOME with whatever appropriate to your needs +# teuthology-lock +lab_domain: local +lock_server: http://localhost:80 +default_machine_type: vps +# teuthology-run +results_server: http://localhost:80 +# we do not need reserve_machines on localhost +reserve_machines: 0 +# point to your teuthology +teuthology_path: $HOME/teuthology +# beanstalkd +queue_host: localhost +queue_port: 11300 +# if you want make and test patches to ceph-cm-ansible +# ceph_cm_ansible_git_url: $HOME/ceph-cm-ansible +# customize kvm guests parameter +downburst: + path: $HOME/downburst/virtualenv/bin/downburst + # define discover_url if you need your custom downburst image server + # discover_url: http://localhost:8181/images/ibs/ + machine: + cpus: 2 + disk: 12G + ram: 2G + volumes: + size: 8G + count: 4 +# add the next two if you do not use shaman +check_package_signatures: false +suite_verify_ceph_hash: false +END + +``` + +List locks: + +```bash +> teuthology-lock --brief --all +localhost up locked None "None" +target-00.local up unlocked None "None" +target-01.local up unlocked None "None" +target-02.local up unlocked None "None" +target-03.local up unlocked None "None" + +``` +Where the `localhost` is special purpose node where libvirt instance is running +and where the target nodes will be created. + +Export the downburst discover url environment variable for your own image storage if required: + +```bash +# cloud image location +export DOWNBURST_DISCOVER_URL=http://localhost:8181/images +``` + +NOTE. The step above is optional and is required if you are going to use custom image +location for the downburst, which is useful though when you want minimize traffic to +you computer. Refer [Create own discovery location](#create-own-discovery-location) +to know more how to create your private image storage. + +Try to lock nodes now: + +```bash +teuthology-lock -v --lock target-00 -m vps --os-type opensuse --os-version 15.2 +teuthology-lock -v --lock-many 1 -m vps --os-type ubuntu --os-version 16.04 +``` + +To initialize all targets you need to use `--lock` instead `--lock-many` +for the first time for each target. + +(Note. It can be probably changed, but this is how it is recommended +in teuthology adding nodes guide for the lab setup) + +For further usage nodes should be unlocked with `--unlock` option. + +### Run beanstalkd + +For openSUSE there is no beanstalkd package as for Ubuntu, so it is needed to add corresponding repo: + +```bash +zypper addrepo https://download.opensuse.org/repositories/filesystems:/ceph:/teuthology/openSUSE_Leap_15.2/x86_64/ teuthology && zypper ref +``` + +Install beanstalkd package and run the service: + +```bash +sudo zypper in beanstalkd +sudo service beanstalkd start +``` + +### Run worker + +Create archive and worker log directories and run the worker polling required tube. + +```bash +TEUTH_HOME=$HOME/.teuthology +mkdir -p $TEUTH_HOME/www/logs/jobs +mkdir -p $TEUTH_HOME/www/logs/workers + +teuthology-worker -v --tube vps --archive-dir $TEUTH_HOME/www/logs/jobs --log-dir $TEUTH_HOME/www/logs/workers +``` + +Schedule a dummy job: +```bash +teuthology-suite -v --ceph-repo https://github.com/ceph/ceph --suite-repo https://github.com/ceph/ceph --ceph octopus --suite dummy -d centos -D 8.2 --sha1 35adebe94e8b0a17e7b56379a8bf24e5f7b8ced4 --limit 1 -m vps -t refs/pull/1548/merge +``` + +## Downburst + +Checkout downburst to your home, bootstrap virtualenv and enable it: +```bash +git clone https://github.com/ceph/downburst ~/downburst +pushd ~/downburst && ./bootstrap +``` + +### Create own discovery location + +(This step is optional, use it if you want to use private image location.) + +Create images directory, and download some images: + +```bash +DATE=$(date +%Y%m%d) +mkdir -p $HOME/.teuthology/www/images +wget http://download.opensuse.org/distribution/leap/15.2/appliances/openSUSE-Leap-15.2-JeOS.x86_64-OpenStack-Cloud.qcow2 -O $HOME/.teuthology/www/images/opensuse-15.2-$DATE-cloudimg-amd64.img +wget http://download.opensuse.org/distribution/leap/15.1/jeos/openSUSE-Leap-15.1-JeOS.x86_64-OpenStack-Cloud.qcow2 -O $HOME/.teuthology/www/images/opensuse-15.1-$DATE-cloudimg-amd64.img +wget http://download.opensuse.org/tumbleweed/appliances/openSUSE-Tumbleweed-JeOS.x86_64-OpenStack-Cloud.qcow2 -O $HOME/.teuthology/www/images/opensuse-tumbleweed-20200810-cloudimg-amd64.img +```` + +Create sha512 for the image: + +```bash +cd $HOME/.teuthology/www/images +sha512sum opensuse-15.2-$DATE-cloudimg-amd64.img | cut -d' ' -f1 > opensuse-15.2-$DATE-cloudimg-amd64.img.sha512 +sha512sum opensuse-15.1-$DATE-cloudimg-amd64.img | cut -d' ' -f1 > opensuse-15.1-$DATE-cloudimg-amd64.img.sha512 +sha512sum opensuse-tumbleweed-20200810-cloudimg-amd64.img | cut -d' ' -f1 > opensuse-tumbleweed-20200810-cloudimg-amd64.img.sha512 +``` + +run webserver localy: + +```bash +(cd $TEUTH_HOME/www && python -m SimpleHTTPServer 8181) +``` + +or + +```bash +(cd $TEUTH_HOME/www && python3 -m http.server 8181) +``` + +```bash +export DOWNBURST_DISCOVER_URL=http://localhost:8181/images/ +``` + +Make sure libvirtd is running and default network is up: + +```bash +sudo service libvirtd start +sudo virsh net-start default +``` + +### Try out node creation + + +List available distro/version and available images. + +```bash +downburst list +``` + +Start a VM for example: + +```bash +downburst -v create --distro opensuse --user-data doc/examples/no-password.opensuse.user.yaml opensuse +sudo virsh net-dhcp-leases default | grep opensuse + +``` diff --git a/docs/laptop/default-pool.xml b/docs/laptop/default-pool.xml new file mode 100644 index 0000000000..106740d726 --- /dev/null +++ b/docs/laptop/default-pool.xml @@ -0,0 +1,7 @@ + + default + + /var/lib/libvirt/images/default + + + diff --git a/docs/laptop/front.xml b/docs/laptop/front.xml new file mode 100644 index 0000000000..67887a0d6d --- /dev/null +++ b/docs/laptop/front.xml @@ -0,0 +1,15 @@ + + front + + + + + + + + + + + + + diff --git a/docs/laptop/hosts b/docs/laptop/hosts new file mode 100644 index 0000000000..d15cad80f8 --- /dev/null +++ b/docs/laptop/hosts @@ -0,0 +1,7 @@ + +# teuthology hosts used as downburst vps targets +192.168.123.100 target-00 target-00.local +192.168.123.101 target-01 target-01.local +192.168.123.102 target-02 target-02.local +192.168.123.103 target-03 target-03.local + diff --git a/docs/laptop/ssh_config b/docs/laptop/ssh_config new file mode 100644 index 0000000000..9b847b9243 --- /dev/null +++ b/docs/laptop/ssh_config @@ -0,0 +1,6 @@ +Host target-* + User ubuntu + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + LogLevel ERROR + diff --git a/docs/laptop/targets.sql b/docs/laptop/targets.sql new file mode 100644 index 0000000000..8e95176298 --- /dev/null +++ b/docs/laptop/targets.sql @@ -0,0 +1,9 @@ +begin transaction; +insert into nodes (name, machine_type, is_vm, locked, up) values ('localhost', 'libvirt', false, true, true); +insert into nodes (name, machine_type, is_vm, locked, up, mac_address, vm_host_id) values +('target-00.local', 'vps', true, false, false, '52:54:00:00:00:00', (select id from nodes where name='localhost')), +('target-01.local', 'vps', true, false, false, '52:54:00:00:00:01', (select id from nodes where name='localhost')), +('target-02.local', 'vps', true, false, false, '52:54:00:00:00:02', (select id from nodes where name='localhost')), +('target-03.local', 'vps', true, false, false, '52:54:00:00:00:03', (select id from nodes where name='localhost')); +commit transaction + diff --git a/docs/laptop/teuthology.yaml b/docs/laptop/teuthology.yaml new file mode 100644 index 0000000000..f29e0a78d6 --- /dev/null +++ b/docs/laptop/teuthology.yaml @@ -0,0 +1,30 @@ +# replace $HOME with whatever appropriate to your needs +# teuthology-lock +lab_domain: local +lock_server: http://localhost:80 +default_machine_type: vps +# teuthology-run +results_server: http://localhost:80 +# we do not need reserve_machines on localhost +reserve_machines: 0 +# point to your teuthology +teuthology_path: $HOME/teuthology +# beanstalkd +queue_host: localhost +queue_port: 11300 +# if you want make and test patches to ceph-cm-ansible +# ceph_cm_ansible_git_url: $HOME/ceph-cm-ansible +# customize kvm guests parameter +downburst: + path: $HOME/downburst/virtualenv/bin/downburst + discover_url: http://localhost:8181/images/ibs/ + machine: + cpus: 2 + disk: 12G + ram: 2G + volumes: + size: 8G + count: 4 +check_package_signatures: false +suite_verify_ceph_hash: false + diff --git a/docs/libcloud_backend.rst b/docs/libcloud_backend.rst new file mode 100644 index 0000000000..84bdf7d9aa --- /dev/null +++ b/docs/libcloud_backend.rst @@ -0,0 +1,43 @@ +.. _libcloud-backend: + +LibCloud backend +================ +This is an *experimental* provisioning backend that eventually intends to support several libcloud drivers. At this time only the OpenStack driver is supported. + +Prerequisites +------------- +* An account with an OpenStack provider that supports Nova and Cinder +* A DNS server supporting `RFC 2136 `_. We use `bind `_ and `this ansible role `_ to help configure ours. +* An `nsupdate-web `_ instance configured to update DNS records. We use `an ansible role `_ for this as well. +* Configuration in `teuthology.yaml` for this backend itself (see :ref:`libcloud_config`) and `nsupdate-web` +* You will also need to choose a maximum number of nodes to be running at once, and create records in your paddles database for each one - making sure to set `is_vm` to `True` for each. + +.. _libcloud_config: + +Configuration +------------- +An example configuration using OVH as an OpenStack provider:: + + libcloud: + providers: + ovh: # This string is the 'machine type' value you will use when locking these nodes + driver: openstack + driver_args: # driver args are passed directly to the libcloud driver + username: 'my_ovh_username' + password: 'my_ovh_password' + ex_force_auth_url: 'https://auth.cloud.ovh.net/v2.0/tokens' + ex_force_auth_version: '2.0_password' + ex_tenant_name: 'my_tenant_name' + ex_force_service_region: 'my_region' + +Why nsupdate-web? +----------------- +While we could have supported directly calling `nsupdate `_, we chose not to. There are a few reasons for this: + +* To avoid piling on yet another feature of teuthology that could be left up to a separate service +* To avoid teuthology users having to request, obtain and safeguard the private key that nsupdate requires to function +* Because we use one subdomain for all of Sepia's test nodes, we had to enable dynamic DNS for that whole zone (this is a limitation of bind). However, we do not want users to be able to push DNS updates for the entire zone. Instead, we gave nsupdate-web the ability to accept or reject requests based on whether the hostname matches a configurable regular expression. The private key itself is not shared with non-admin users. + +Bugs +---- +At this time, only OVH has been tested as a provider. PRs are welcome to support more! diff --git a/docs/openstack_backend.rst b/docs/openstack_backend.rst new file mode 100644 index 0000000000..36f8fdf2ba --- /dev/null +++ b/docs/openstack_backend.rst @@ -0,0 +1,214 @@ +.. _openstack-backend: + +OpenStack backend +================= + +The ``teuthology-openstack`` command is a wrapper around +``teuthology-suite`` that transparently creates the teuthology cluster +using OpenStack virtual machines. + +Prerequisites +------------- + +An OpenStack tenant with access to the nova and cinder API. If the +cinder API is not available, some jobs won't run because they expect +volumes attached to each instance. + +Setup OpenStack at OVH +---------------------- + +Each instance has a public IP by default. + +* `create an account `_ +* get $HOME/openrc.sh from `the horizon dashboard `_ + +Setup +----- + +* Get and configure teuthology:: + + $ git clone http://github.com/ceph/teuthology + $ cd teuthology ; ./bootstrap install + $ source virtualenv/bin/activate + +* Setup the teuthology node:: + + $ teuthology-openstack --key-filename myself.pem --key-name myself --setup + +Get OpenStack credentials and test it +------------------------------------- + +* follow the `OpenStack API Quick Start `_ +* source $HOME/openrc.sh +* verify the OpenStack client works:: + + $ nova list + +----+------------+--------+------------+-------------+-------------------------+ + | ID | Name | Status | Task State | Power State | Networks | + +----+------------+--------+------------+-------------+-------------------------+ + +----+------------+--------+------------+-------------+-------------------------+ +* create a passwordless ssh public key with:: + + $ openstack keypair create myself > myself.pem + +-------------+-------------------------------------------------+ + | Field | Value | + +-------------+-------------------------------------------------+ + | fingerprint | e0:a3:ab:5f:01:54:5c:1d:19:40:d9:62:b4:b3:a1:0b | + | name | myself | + | user_id | 5cf9fa21b2e9406b9c4108c42aec6262 | + +-------------+-------------------------------------------------+ + $ chmod 600 myself.pem + +Usage +----- + +* Run the dummy suite. It does nothing useful but shows all works as + expected. Note that the first time it is run, it can take a long + time (from a few minutes to half an hour or so) because it downloads + and uploads a cloud image to the OpenStack provider. :: + + $ teuthology-openstack --key-filename myself.pem --key-name myself --suite dummy + Job scheduled with name ubuntu-2015-07-24_09:03:29-dummy-main---basic-openstack and ID 1 + 2015-07-24 09:03:30,520.520 INFO:teuthology.suite:ceph sha1: dedda6245ce8db8828fdf2d1a2bfe6163f1216a1 + 2015-07-24 09:03:31,620.620 INFO:teuthology.suite:ceph version: v9.0.2-829.gdedda62 + 2015-07-24 09:03:31,620.620 INFO:teuthology.suite:teuthology branch: main + 2015-07-24 09:03:32,196.196 INFO:teuthology.suite:ceph-qa-suite branch: main + 2015-07-24 09:03:32,197.197 INFO:teuthology.repo_utils:Fetching from upstream into /home/ubuntu/src/ceph-qa-suite_main + 2015-07-24 09:03:33,096.096 INFO:teuthology.repo_utils:Resetting repo at /home/ubuntu/src/ceph-qa-suite_main to branch main + 2015-07-24 09:03:33,157.157 INFO:teuthology.suite:Suite dummy in /home/ubuntu/src/ceph-qa-suite_main/suites/dummy generated 1 jobs (not yet filtered) + 2015-07-24 09:03:33,158.158 INFO:teuthology.suite:Scheduling dummy/{all/nop.yaml} + 2015-07-24 09:03:34,045.045 INFO:teuthology.suite:Suite dummy in /home/ubuntu/src/ceph-qa-suite_main/suites/dummy scheduled 1 jobs. + 2015-07-24 09:03:34,046.046 INFO:teuthology.suite:Suite dummy in /home/ubuntu/src/ceph-qa-suite_main/suites/dummy -- 0 jobs were filtered out. + + 2015-07-24 11:03:34,104.104 INFO:teuthology.openstack: + web interface: http://167.114.242.13:8081/ + ssh access : ssh ubuntu@167.114.242.13 # logs in /usr/share/nginx/html + +* Visit the web interface (the URL is displayed at the end of the + teuthology-openstack output) to monitor the progress of the suite. + +* The virtual machine running the suite will persist for forensic + analysis purposes. To destroy it run:: + + $ teuthology-openstack --key-filename myself.pem --key-name myself --teardown + +* The test results can be uploaded to a publicly accessible location + with the ``--upload`` flag:: + + $ teuthology-openstack --key-filename myself.pem --key-name myself \ + --suite dummy --upload + + +Troubleshooting +--------------- + +Debian Jessie users may face the following error:: + + NameError: name 'PROTOCOL_SSLv3' is not defined + +The `workaround +`_ +suggesting to replace ``PROTOCOL_SSLv3`` with ``PROTOCOL_SSLv23`` in +the ssl.py has been reported to work. + +Running the OpenStack backend integration tests +----------------------------------------------- + +The easiest way to run the integration tests is to first run a dummy suite:: + + $ teuthology-openstack --key-name myself --suite dummy + ... + ssh access : ssh ubuntu@167.114.242.13 + +This will create a virtual machine suitable for the integration +test. Login wih the ssh access displayed at the end of the +``teuthology-openstack`` command and run the following:: + + $ pkill -f teuthology-worker + $ cd teuthology ; pip install "tox>=1.9" + $ tox -v -e openstack-integration + integration/openstack-integration.py::TestSuite::test_suite_noop PASSED + ... + ========= 9 passed in 2545.51 seconds ======== + $ tox -v -e openstack + integration/test_openstack.py::TestTeuthologyOpenStack::test_create PASSED + ... + ========= 1 passed in 204.35 seconds ========= + +Defining instances flavor and volumes +------------------------------------- + +Each target (i.e. a virtual machine or instance in the OpenStack +parlance) created by the OpenStack backend are exactly the same. By +default they have at least 8GB RAM, 20GB disk, 1 cpus and no disk +attached. It is equivalent to having the following in the +`~/.teuthology.yaml `_ file:: + + openstack: + ... + machine: + disk: 20 # GB + ram: 8000 # MB + cpus: 1 + volumes: + count: 0 + size: 1 # GB + +If a job needs more RAM or disk etc. the following can be included in +an existing facet (yaml file in the teuthology parlance):: + + openstack: + - machine: + disk: 100 # GB + volumes: + count: 4 + size: 10 # GB + +Teuthology interprets this as the minimimum requirements, on top of +the defaults found in the ``~/.teuthology.yaml`` file and the job will +be given instances with at least 100GB root disk, 8GB RAM, 1 cpus and +four 10GB volumes attached. The highest value wins: if the job claims +to need 4GB RAM and the defaults are 8GB RAM, the targets will all +have 8GB RAM. + +Note the dash before the ``machine`` key: the ``openstack`` element is +an array with one value. If the dash is missing, it is a dictionary instead. +It matters because there can be multiple entries per job such as:: + + openstack: + - machine: + disk: 40 # GB + ram: 8000 # MB + + openstack: + - machine: + ram: 32000 # MB + + openstack: + - volumes: # attached to each instance + count: 3 + size: 200 # GB + +When a job is composed with these, teuthology aggregates them as:: + + openstack: + - machine: + disk: 40 # GB + ram: 8000 # MB + - machine: + ram: 32000 # MB + - volumes: # attached to each instance + count: 3 + size: 200 # GB + +i.e. all entries are grouped in a list in the same fashion ``tasks`` are. +The resource requirement is the maximum of the resources found in each +element (including the default values). In the example above it is equivalent to:: + + openstack: + machine: + disk: 40 # GB + ram: 32000 # MB + volumes: # attached to each instance + count: 3 + size: 200 # GB diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000..c2b295e9f6 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,3 @@ +sphinx == 4.4.0 +sphinxcontrib-programoutput +mock == 2.0.0 diff --git a/docs/siteconfig.rst b/docs/siteconfig.rst new file mode 100644 index 0000000000..5bc98e98d6 --- /dev/null +++ b/docs/siteconfig.rst @@ -0,0 +1,248 @@ +.. _site_config: + +Site and Client Configuration +============================= + +Teuthology requires several configuration options to be set, and provides many other optional ones. They are looked for in ``~/.teuthology.yaml`` if it exists, or ``/etc/teuthology.yaml`` if it doesn't. + +Here is a sample configuration with many of the options set and documented:: + + # lab_domain: the domain name to append to all short hostnames + lab_domain: example.com + + # The root directory to use for storage of all scheduled job logs and + # other data. + archive_base: /home/teuthworker/archive + + # The default machine_type value to use when not specified. Currently + # only used by teuthology-suite. + default_machine_type: awesomebox + + # Control how many machines need to be free in the cluster. 0 means + # Teuthology can use the entire cluster. + reserve_machines: 5 + + # The host and port to use for the beanstalkd queue. This is required + # for scheduled jobs. + queue_host: localhost + queue_port: 11300 + + # The URL of the lock server (paddles). This is required for scheduled + # jobs. + lock_server: http://paddles.example.com:8080/ + + # The URL of the results server (paddles). + results_server: http://paddles.example.com:8080/ + + # This URL of the results UI server (pulpito). You must of course use + # paddles for pulpito to be useful. + results_ui_server: http://pulpito.example.com/ + + # Email address that will receive job results summaries. + results_email: ceph-qa@example.com + + # Email address that job results summaries originate from + results_sending_email: teuthology@example.com + + # How long (in seconds) teuthology-results should wait for jobs to finish + # before considering them 'hung' + results_timeout: 43200 + + # Gitbuilder archive that stores e.g. ceph packages + gitbuilder_host: gitbuilder.example.com + + # URL for 'gitserver' helper web application + # see http://github.com/ceph/gitserver + githelper_base_url: http://git.ceph.com:8080 + + # Verify the packages signatures + check_package_signatures: true + + # Where all git repos are considered to reside. + ceph_git_base_url: https://github.com/ceph/ + + # Where the ceph git repo is considered to reside. + ceph_git_url: https://github.com/ceph/ceph.git + + # Where the ceph-qa-suite git repo is considered to reside. + ceph_qa_suite_git_url: https://github.com/ceph/ceph-qa-suite.git + + # Where teuthology and ceph-qa-suite repos should be stored locally + src_base_path: /home/foo/src + + # Where teuthology path is located: do not clone if present + #teuthology_path: . + + # Whether or not teuthology-suite, when scheduling, should update + # itself from git. This is disabled by default. + automated_scheduling: false + + # How often, in seconds, teuthology-worker should poll its child job + # processes + watchdog_interval: 120 + + # How long a scheduled job should be allowed to run, in seconds, before + # it is killed by the worker process. + max_job_time: 259200 + + # The template from which the URL of the repository containing packages + # is built. + # + # {host} is 'gitbuilder_host' from .teuthology.yaml + # {proj} is the value of 'project' from the job yaml file or 'ceph' + # {flavor} is the value of 'flavor' from the job yaml file or 'default' + # {uri} is ref/tag if 'tag' is set in the job yaml file + # or ref/branch if 'branch' is set in the job yaml file + # or sha1/sha1 if 'sha1' is set in the job yaml file + # or ref/main + # {pkg_type} is either 'deb' or 'rpm' depending on the host on which the + # packages are to be installed + # {dist} If lsb_release -si is Fedora the value is: + # Fedora 20 => fc20 + # Fedora 21 => fc21 + # etc. + # If lsb_release -si is CentOS or RedHatEnterpriseServer it is + # CentOS 6.5 => centos6 + # CentOS 7.0 => centos7 + # CentOS 7.1 => centos7 + # RedHatEnterpriseServer 6.4 => centos6 + # RedHatEnterpriseServer 7.0 => centos7 + # RedHatEnterpriseServer 7.1 => centos7 + # etc. + # Everything else is whatever lsb_release -sc returns + # Ubuntu 12.04 => precise + # Ubuntu 14.04 => trusty + # Debian GNU/Linux 7.0 => wheezy + # Debian GNU/Linux 8.0 => jessie + # etc. + # {arch} is the output of the 'arch' command on the host on which + # the packages are to be installed + # i386 + # x86_64 + # armv7l + # etc. + baseurl_template: http://{host}/{proj}-{pkg_type}-{dist}-{arch}-{flavor}/{uri} + + # If True, teuthology-suite verifies that a package matching the + # desired ceph branch exists in the gitbuilder. If False, no + # verification is done and teuthology-suite assumes the packages + # are either not necessary to run the task or they are created on + # demand. + suite_verify_ceph_hash: True + + # If true, teuthology-suite will schedule jobs even if the required + # packages are not built. + suite_allow_missing_packages: False + + # The rsync destination to upload the job results, when --upload is + # is provided to teuthology-suite. + # + archive_upload: ubuntu@teuthology-logs.public.ceph.com:./ + + # The path to the SSH private key for rsync to upload to archive_upload + # + archive_upload_key: None + + # The public facing URL of the archive_upload location + # + archive_upload_url: http://teuthology-logs.public.ceph.com/ + + # The OpenStack backend configuration, a dictionary interpreted as follows + # + openstack: + + # The teuthology-openstack command will clone teuthology with + # this command for the purpose of deploying teuthology from + # scratch and run workers listening on the openstack tube + # + clone: git clone http://github.com/ceph/teuthology + + # The path to the user-data file used when creating a target. It can have + # the {os_type} and {os_version} placeholders which are replaced with + # the value of --os-type and --os-version. No instance of a give {os_type} + # and {os_version} combination can be created unless such a file exists. + # + user-data: teuthology/openstack/openstack-{os_type}-{os_version}-user-data.txt + + # The IP number of the instance running the teuthology cluster. It will + # be used to build user facing URLs and should usually be the floating IP + # associated with the instance running the pulpito server. + # + ip: 8.4.8.4 + + # OpenStack has predefined machine sizes (called flavors) + # For a given job requiring N machines, the following example select + # the smallest flavor that satisfies these requirements. For instance + # If there are three flavors + # + # F1 (10GB disk, 2000MB RAM, 1CPU) + # F2 (100GB disk, 7000MB RAM, 1CPU) + # F3 (50GB disk, 7000MB RAM, 1CPU) + # + # and machine: { disk: 40, ram: 7000, cpus: 1 }, F3 will be chosen. + # F1 does not have enough RAM (2000 instead of the 7000 minimum) and + # although F2 satisfies all the requirements, it is larger than F3 + # (100GB instead of 50GB) and presumably more expensive. + # + # This configuration applies to all instances created for teuthology jobs + # that do not redefine these values. + # + machine: + + # The minimum root disk size of the flavor, in GB + # + disk: 20 # GB + + # The minimum RAM size of the flavor, in MB + # + ram: 8000 # MB + + # The minimum number of vCPUS of the flavor + # + cpus: 1 + + # The volumes attached to each instance. In the following example, + # three volumes of 10 GB will be created for each instance and + # will show as /dev/vdb, /dev/vdc and /dev/vdd + # + # This configuration applies to all instances created for teuthology jobs + # that do not redefine these values. + # + volumes: + + # The number of volumes + # + count: 3 + + # The size of each volume, in GB + # + size: 10 # GB + + # The host running a [PCP](http://pcp.io/) manager + pcp_host: http://pcp.front.sepia.ceph.com:44323/ + + # Settings for http://www.conserver.com/ + use_conserver: true + conserver_master: conserver.front.sepia.ceph.com + conserver_port: 3109 + + # Settings for [nsupdate-web](https://github.com/zmc/nsupdate-web) + # Used by the [libcloud](https://libcloud.apache.org/) backend + nsupdate_url: http://nsupdate.front.sepia.ceph.com/update + + # Settings for https://fogproject.org/ + fog: + endpoint: http://fog.example.com/fog + api_token: your_api_token + user_token: your_user_token + machine_types: ['mira', 'smithi'] + + # FOG provisioner is default and switching to Pelgas + # should be made explicitly + pelagos: + endpoint: http://head.ses.suse.de:5000/ + machine_types: ['type1', 'type2', 'type3'] + + # Do not allow more than that many jobs in a single run by default. + # To disable this check use 0. + job_threshold: 500 diff --git a/examples/3node_ceph.yaml b/examples/3node_ceph.yaml new file mode 100644 index 0000000000..16544f3410 --- /dev/null +++ b/examples/3node_ceph.yaml @@ -0,0 +1,15 @@ +roles: +- [mon.0, mds.0, osd.0] +- [mon.1, osd.1] +- [mon.2, client.0] + +tasks: +- install: +- ceph: +- kclient: [client.0] +- interactive: + +targets: + ubuntu@: ssh-rsa + ubuntu@: ssh-rsa + ubuntu@: ssh-rsa diff --git a/examples/3node_rgw.yaml b/examples/3node_rgw.yaml new file mode 100644 index 0000000000..e0a42e2ffd --- /dev/null +++ b/examples/3node_rgw.yaml @@ -0,0 +1,24 @@ +interactive-on-error: true +overrides: + ceph: + branch: main + fs: xfs +roles: +- - mon.a + - mon.c + - osd.0 +- - mon.b + - mds.a + - osd.1 +- - client.0 +tasks: +- install: +- ceph: null +- rgw: + - client.0 +- interactive: + +targets: + ubuntu@: ssh-rsa + ubuntu@: ssh-rsa + ubuntu@: ssh-rsa diff --git a/examples/parallel_example.yaml b/examples/parallel_example.yaml new file mode 100644 index 0000000000..d1491358b2 --- /dev/null +++ b/examples/parallel_example.yaml @@ -0,0 +1,20 @@ +interactive-on-error: true +overrides: +roles: +- - test0 + - test1 +- - test0 + - test1 +- - test0 +tasks: +- install: +- parallel_example: + - test0 + - test1 + +targets: + ubuntu@: ssh-rsa + ubuntu@: ssh-rsa + ubuntu@: ssh-rsa + + diff --git a/hammer.sh b/hammer.sh new file mode 100755 index 0000000000..9f206f2e98 --- /dev/null +++ b/hammer.sh @@ -0,0 +1,32 @@ +#!/bin/sh -ex +# +# simple script to repeat a test until it fails +# + +if [ $1 = "-a" ]; then + shift + job=$1 + log="--archive $job.out" +else + job=$1 + log="" +fi + +test -e $1 + +title() { + echo '\[\033]0;hammer '$job' '$N' passes\007\]' +} + +N=0 +title +[ -n "$log" ] && [ -d $job.out ] && rm -rf $job.out +while teuthology $log $job $2 $3 $4 +do + date + N=$(($N+1)) + echo "$job: $N passes" + [ -n "$log" ] && rm -rf $job.out + title +done +echo "$job: $N passes, then failure." diff --git a/openstack-delegate.sh b/openstack-delegate.sh new file mode 100755 index 0000000000..01b7e63a26 --- /dev/null +++ b/openstack-delegate.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +trap "rm -f teuthology-integration.pem ; openstack keypair delete teuthology-integration ; openstack server delete teuthology-integration" EXIT + +openstack keypair create teuthology-integration > teuthology-integration.pem +chmod 600 teuthology-integration.pem +teuthology-openstack --name teuthology-integration --key-filename teuthology-integration.pem --key-name teuthology-integration --suite teuthology/integration --wait --teardown --upload diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..ece6fe5f51 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,10 @@ +[build-system] +build-backend = "setuptools.build_meta" +requires = [ + "setuptools>=45", + "wheel", + "setuptools_scm>=6.2", +] + +[tool.setuptools_scm] +version_scheme = "python-simplified-semver" \ No newline at end of file diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..60d435c8b7 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +norecursedirs = .git build virtualenv teuthology.egg-info .tox */integration task/tests +log_cli=true +log_level=NOTSET +addopts = -p no:cacheprovider diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..85f09be3fc --- /dev/null +++ b/requirements.txt @@ -0,0 +1,384 @@ +# +# This file is autogenerated by pip-compile with python 3.6 +# To update, run: +# +# pip-compile --extra=test pyproject.toml +# +ansible==2.10.7 + # via teuthology (pyproject.toml) +ansible-base==2.10.17 + # via ansible +apache-libcloud==3.3.1 + # via teuthology (pyproject.toml) +appdirs==1.4.4 + # via openstacksdk +argparse==1.4.0 + # via teuthology (pyproject.toml) +attrs==21.2.0 + # via + # cmd2 + # pytest +backports.ssl-match-hostname==3.7.0.1 + # via teuthology (pyproject.toml) +bcrypt==3.2.0 + # via paramiko +beanstalkc3==0.4.0 + # via teuthology (pyproject.toml) +boto==2.49.0 + # via teuthology (pyproject.toml) +boto3==1.21.46 + # via teuthology (pyproject.toml) +botocore==1.24.46 + # via + # boto3 + # s3transfer +certifi==2021.5.30 + # via + # requests + # sentry-sdk +cffi==1.14.6 + # via + # bcrypt + # cryptography + # pynacl +charset-normalizer==2.0.4 + # via requests +click==8.0.1 + # via pip-tools +cliff==3.8.0 + # via + # osc-lib + # python-openstackclient +cmd2==2.1.2 + # via cliff +colorama==0.4.4 + # via cmd2 +configobj==5.0.6 + # via teuthology (pyproject.toml) +configparser==5.0.2 + # via teuthology (pyproject.toml) +cryptography==3.4.7 + # via + # ansible-base + # openstacksdk + # paramiko + # pyopenssl + # teuthology (pyproject.toml) +debtcollector==2.2.0 + # via + # oslo.config + # oslo.utils + # python-keystoneclient +decorator==5.0.9 + # via + # dogpile.cache + # openstacksdk +distlib==0.3.4 + # via virtualenv +docopt==0.6.2 + # via teuthology (pyproject.toml) +dogpile.cache==1.1.5 + # via openstacksdk +filelock==3.4.1 + # via + # tox + # virtualenv +gevent==21.8.0 + # via teuthology (pyproject.toml) +greenlet==1.1.0 + # via gevent +httplib2==0.19.1 + # via teuthology (pyproject.toml) +humanfriendly==9.2 + # via teuthology (pyproject.toml) +idna==3.2 + # via requests +importlib-metadata==4.8.3 + # via + # click + # cmd2 + # openstacksdk + # oslo.config + # pep517 + # pluggy + # prettytable + # pytest + # stevedore + # tox + # virtualenv +importlib-resources==5.4.0 + # via + # netaddr + # virtualenv +iniconfig==1.1.1 + # via pytest +ipy==1.1 + # via teuthology (pyproject.toml) +iso8601==0.1.16 + # via + # keystoneauth1 + # openstacksdk + # oslo.utils + # python-novaclient + # python-openstackclient +jinja2==3.0.1 + # via ansible-base +jmespath==0.10.0 + # via + # boto3 + # botocore + # openstacksdk +jsonpatch==1.32 + # via openstacksdk +jsonpointer==2.1 + # via jsonpatch +keystoneauth1==4.3.1 + # via + # openstacksdk + # osc-lib + # python-cinderclient + # python-keystoneclient + # python-novaclient +markupsafe==2.0.1 + # via jinja2 +mock==4.0.3 + # via teuthology (pyproject.toml) +msgpack==1.0.2 + # via oslo.serialization +munch==2.5.0 + # via openstacksdk +ndg-httpsclient==0.5.1 + # via teuthology (pyproject.toml) +netaddr==0.8.0 + # via + # oslo.config + # oslo.utils + # teuthology (pyproject.toml) +netifaces==0.11.0 + # via + # openstacksdk + # oslo.utils +nose==1.3.7 + # via teuthology (pyproject.toml) +openstacksdk==0.58.0 + # via + # osc-lib + # python-openstackclient +os-service-types==1.7.0 + # via + # keystoneauth1 + # openstacksdk +osc-lib==2.4.1 + # via python-openstackclient +oslo.config==8.8.0 + # via python-keystoneclient +oslo.i18n==5.1.0 + # via + # osc-lib + # oslo.config + # oslo.utils + # python-cinderclient + # python-keystoneclient + # python-novaclient + # python-openstackclient +oslo.serialization==4.3.0 + # via + # python-keystoneclient + # python-novaclient +oslo.utils==4.12.2 + # via + # osc-lib + # oslo.serialization + # python-cinderclient + # python-keystoneclient + # python-novaclient + # python-openstackclient +packaging==21.0 + # via + # ansible-base + # oslo.utils + # pytest + # tox +paramiko==2.10.1 + # via teuthology (pyproject.toml) +pbr==5.6.0 + # via + # cliff + # debtcollector + # keystoneauth1 + # openstacksdk + # os-service-types + # osc-lib + # oslo.i18n + # oslo.serialization + # oslo.utils + # python-cinderclient + # python-keystoneclient + # python-novaclient + # python-openstackclient + # stevedore +pep517==0.11.0 + # via pip-tools +pexpect==4.8.0 + # via teuthology (pyproject.toml) +pip-tools==6.4.0 + # via teuthology (pyproject.toml) +platformdirs==2.4.0 + # via virtualenv +pluggy==1.0.0 + # via + # pytest + # tox +prettytable==2.1.0 + # via + # cliff + # python-cinderclient + # python-novaclient + # teuthology (pyproject.toml) +psutil==5.8.0 + # via teuthology (pyproject.toml) +ptyprocess==0.7.0 + # via pexpect +py==1.11.0 + # via + # pytest + # tox +pyasn1==0.4.8 + # via + # ndg-httpsclient + # teuthology (pyproject.toml) +pycparser==2.20 + # via cffi +pyjwt==2.3.0 + # via teuthology (pyproject.toml) +pynacl==1.5.0 + # via + # paramiko + # teuthology (pyproject.toml) +pyopenssl==20.0.1 + # via + # ndg-httpsclient + # teuthology (pyproject.toml) +pyparsing==2.4.7 + # via + # cliff + # httplib2 + # oslo.utils + # packaging +pyperclip==1.8.2 + # via cmd2 +pytest==7.0.1 + # via teuthology (pyproject.toml) +python-cinderclient==8.0.0 + # via python-openstackclient +python-dateutil==2.8.2 + # via + # botocore + # teuthology (pyproject.toml) +python-keystoneclient==4.2.0 + # via python-openstackclient +python-novaclient==17.5.0 + # via + # python-openstackclient + # teuthology (pyproject.toml) +python-openstackclient==5.5.0 + # via teuthology (pyproject.toml) +pytz==2021.1 + # via + # oslo.serialization + # oslo.utils +pyyaml==5.4.1 + # via + # ansible-base + # cliff + # openstacksdk + # oslo.config + # teuthology (pyproject.toml) +requests==2.27.1 + # via + # apache-libcloud + # keystoneauth1 + # oslo.config + # python-cinderclient + # python-keystoneclient + # teuthology (pyproject.toml) +requestsexceptions==1.4.0 + # via openstacksdk +rfc3986==1.5.0 + # via oslo.config +s3transfer==0.5.2 + # via boto3 +sentry-sdk==1.6.0 + # via teuthology (pyproject.toml) +simplejson==3.17.3 + # via + # osc-lib + # python-cinderclient +six==1.16.0 + # via + # bcrypt + # configobj + # debtcollector + # keystoneauth1 + # munch + # paramiko + # pyopenssl + # python-dateutil + # python-keystoneclient + # tox + # virtualenv +stevedore==3.3.0 + # via + # cliff + # dogpile.cache + # keystoneauth1 + # osc-lib + # oslo.config + # python-cinderclient + # python-keystoneclient + # python-novaclient + # python-openstackclient +toml==0.10.2 + # via + # teuthology (pyproject.toml) + # tox +tomli==1.2.1 + # via + # pep517 + # pytest +tox==3.25.0 + # via teuthology (pyproject.toml) +typing-extensions==4.1.1 + # via + # cmd2 + # importlib-metadata +urllib3==1.26.6 + # via + # botocore + # requests + # sentry-sdk +virtualenv==20.14.1 + # via tox +wcwidth==0.2.5 + # via + # cmd2 + # prettytable +wheel==0.36.2 + # via pip-tools +wrapt==1.12.1 + # via debtcollector +xmltodict==0.12.0 + # via teuthology (pyproject.toml) +zipp==3.6.0 + # via + # importlib-metadata + # importlib-resources + # pep517 +zope.event==4.5.0 + # via gevent +zope.interface==5.4.0 + # via gevent + +# The following packages are considered to be unsafe in a requirements file: +# pip +# setuptools diff --git a/roles/3-simple.yaml b/roles/3-simple.yaml new file mode 100644 index 0000000000..ac2b3917a9 --- /dev/null +++ b/roles/3-simple.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mds.a, osd.0] +- [mon.b, mds.a-s, osd.1] +- [mon.c, client.0] diff --git a/roles/overrides.yaml b/roles/overrides.yaml new file mode 100644 index 0000000000..e93a2b2396 --- /dev/null +++ b/roles/overrides.yaml @@ -0,0 +1,10 @@ +nuke-on-error: true +kernel: + branch: main +overrides: + ceph: + branch: BRANCH_NAME + log-ignorelist: + - 'clocks not synchronized' +tasks: +- chef: diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/describe.py b/scripts/describe.py new file mode 100644 index 0000000000..0764ecf6b3 --- /dev/null +++ b/scripts/describe.py @@ -0,0 +1,79 @@ +import docopt + +import teuthology.config +import teuthology.describe_tests + +doc = """ +usage: + teuthology-describe-tests -h + teuthology-describe-tests [options] [--] + +Describe the contents of a qa suite by reading 'meta' elements from +yaml files in the suite. + +The 'meta' element should contain a list with a dictionary +of key/value pairs for entries, i.e.: + +meta: +- field1: value1 + field2: value2 + field3: value3 + desc: short human-friendly description + +Fields are user-defined, and are not required to be in all yaml files. + +positional arguments: + path of qa suite + +optional arguments: + -h, --help Show this help message and exit + -f , --fields Comma-separated list of fields to + include [default: desc] + --show-facet [yes|no] List the facet of each file + [default: yes] + --format [plain|json|csv] Output format (written to stdout) + [default: plain] + +options only for describing combinations represented by a suite: + -c, --combinations Describe test combinations rather than + individual yaml fragments + -s, --summary Print summary + --filter Only list tests whose description contains + at least one of the keywords in the comma + separated keyword string specified + --filter-out Do not list tests whose description contains + any of the keywords in the comma separated + keyword string specified + --filter-all Only list tests whose description contains + each of the keywords in the comma separated + keyword string specified + -F, --filter-fragments Check fragments additionaly to descriptions + using keywords specified with 'filter', + 'filter-out' and 'filter-all' options. + -p, --print-description Print job descriptions for the suite, + used only in combination with 'summary' + -P, --print-fragments Print file list inovolved for each facet, + used only in combination with 'summary' + -l , --limit List at most this many jobs + [default: 0] + --subset Instead of listing the entire + suite, break the set of jobs into + pieces (each of which + will contain each facet at least + once) and list piece . + Listing 0/, 1/, + 2/ ... -1/ + will list all jobs in the + suite (many more than once). + -S , --seed Used for pseudo-random tests generation + involving facet whose path ends with '$' + operator, where negative value used for + a random seed + [default: -1] + --no-nested-subset Disable nested subsets +""" + + +def main(): + args = docopt.docopt(doc) + teuthology.describe_tests.main(args) diff --git a/scripts/dispatcher.py b/scripts/dispatcher.py new file mode 100644 index 0000000000..4cb1abdea6 --- /dev/null +++ b/scripts/dispatcher.py @@ -0,0 +1,35 @@ +""" +usage: teuthology-dispatcher --help + teuthology-dispatcher --supervisor [-v] --bin-path BIN_PATH --job-config COFNFIG --archive-dir DIR + teuthology-dispatcher [-v] [--archive-dir DIR] [--exit-on-empty-queue] --log-dir LOG_DIR --tube TUBE + +Start a dispatcher for the specified tube. Grab jobs from a beanstalk +queue and run the teuthology tests they describe as subprocesses. The +subprocess invoked is a teuthology-dispatcher command run in supervisor +mode. + +Supervisor mode: Supervise the job run described by its config. Reimage +target machines and invoke teuthology command. Unlock the target machines +at the end of the run. + +standard arguments: + -h, --help show this help message and exit + -v, --verbose be more verbose + -t, --tube TUBE which beanstalk tube to read jobs from + -l, --log-dir LOG_DIR path in which to store logs + -a DIR, --archive-dir DIR path to archive results in + --supervisor run dispatcher in job supervisor mode + --bin-path BIN_PATH teuthology bin path + --job-config CONFIG file descriptor of job's config file + --exit-on-empty-queue if the queue is empty, exit +""" + +import docopt +import sys + +import teuthology.dispatcher + + +def main(): + args = docopt.docopt(__doc__) + sys.exit(teuthology.dispatcher.main(args)) diff --git a/scripts/kill.py b/scripts/kill.py new file mode 100644 index 0000000000..31acc8b1a4 --- /dev/null +++ b/scripts/kill.py @@ -0,0 +1,44 @@ +import docopt + +import teuthology.config +import teuthology.kill + +doc = """ +usage: teuthology-kill -h + teuthology-kill [-a ARCHIVE] [-p] -r RUN + teuthology-kill [-a ARCHIVE] [-p] -m MACHINE_TYPE -r RUN + teuthology-kill [-a ARCHIVE] [-o OWNER] -r RUN -j JOB ... + teuthology-kill [-a ARCHIVE] [-o OWNER] -J JOBSPEC + teuthology-kill [-p] -o OWNER -m MACHINE_TYPE -r RUN + +Kill running teuthology jobs: +1. Removes any queued jobs from the beanstalk queue +2. Kills any running jobs +3. Nukes any machines involved + +NOTE: Must be run on the same machine that is executing the teuthology job +processes. + +optional arguments: + -h, --help show this help message and exit + -a ARCHIVE, --archive ARCHIVE + The base archive directory + [default: {archive_base}] + -p, --preserve-queue Preserve the queue - do not delete queued jobs + -r, --run RUN The name(s) of the run(s) to kill + -j, --job JOB The job_id of the job to kill + -J, --jobspec JOBSPEC + The 'jobspec' of the job to kill. A jobspec consists of + both the name of the run and the job_id, separated by a + '/'. e.g. 'my-test-run/1234' + -o, --owner OWNER The owner of the job(s) + -m, --machine-type MACHINE_TYPE + The type of machine the job(s) are running on. + This is required if killing a job that is still + entirely in the queue. +""".format(archive_base=teuthology.config.config.archive_base) + + +def main(): + args = docopt.docopt(doc) + teuthology.kill.main(args) diff --git a/scripts/lock.py b/scripts/lock.py new file mode 100644 index 0000000000..939800a495 --- /dev/null +++ b/scripts/lock.py @@ -0,0 +1,181 @@ +import argparse +import textwrap +import sys + +import teuthology.lock +import teuthology.lock.cli + + +def _positive_int(string): + value = int(string) + if value < 1: + raise argparse.ArgumentTypeError( + '{string} is not positive'.format(string=string)) + return value + + +def main(): + sys.exit(teuthology.lock.cli.main(parse_args(sys.argv[1:]))) + + +def parse_args(argv): + parser = argparse.ArgumentParser( + description='Lock, unlock, or query lock status of machines', + epilog=textwrap.dedent(''' + Examples: + teuthology-lock --summary + teuthology-lock --lock-many 1 --machine-type vps + teuthology-lock --lock -t target.yaml + teuthology-lock --list-targets plana01 + teuthology-lock --list --brief --owner user@host + teuthology-lock --brief + teuthology-lock --update --status down --desc testing plana01 + '''), + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + '-v', '--verbose', + action='store_true', + default=False, + help='be more verbose', + ) + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + '--list', + action='store_true', + default=False, + help='Show lock info for machines owned by you, or only machines ' + + 'specified. Can be restricted by --owner, --status, and --locked.', + ) + group.add_argument( + '--brief', + action='store_true', + default=False, + help='Like --list, but with summary instead of detail', + ) + group.add_argument( + '--list-targets', + action='store_true', + default=False, + help='Show lock info for all machines, or only machines specified, ' + + 'in targets: yaml format. Can be restricted by --owner, --status, ' + + 'and --locked.', + ) + group.add_argument( + '--lock', + action='store_true', + default=False, + help='lock particular machines', + ) + group.add_argument( + '--unlock', + action='store_true', + default=False, + help='unlock particular machines', + ) + group.add_argument( + '--lock-many', + dest='num_to_lock', + type=_positive_int, + help='lock this many machines', + ) + group.add_argument( + '--update', + action='store_true', + default=False, + help='update the description or status of some machines', + ) + group.add_argument( + '--summary', + action='store_true', + default=False, + help='summarize locked-machine counts by owner', + ) + parser.add_argument( + '-a', '--all', + action='store_true', + default=False, + help='list all machines, not just those owned by you', + ) + parser.add_argument( + '--owner', + default=None, + help='owner of the lock(s) (must match to unlock a machine)', + ) + parser.add_argument( + '-f', + action='store_true', + default=False, + help="don't exit after the first error, continue locking or " + + "unlocking other machines", + ) + parser.add_argument( + '--desc', + default=None, + help='lock description', + ) + parser.add_argument( + '--desc-pattern', + default=None, + help='lock description', + ) + parser.add_argument( + '-m', '--machine-type', + default=None, + help='Type of machine to lock, valid choices: mira | plana | ' + + 'burnupi | vps | saya | tala', + ) + parser.add_argument( + '--status', + default=None, + choices=['up', 'down'], + help='whether a machine is usable for testing', + ) + parser.add_argument( + '--locked', + default=None, + choices=['true', 'false'], + help='whether a machine is locked', + ) + parser.add_argument( + '-t', '--targets', + dest='targets', + default=None, + help='input yaml containing targets', + ) + parser.add_argument( + 'machines', + metavar='MACHINE', + default=[], + nargs='*', + help='machines to operate on', + ) + parser.add_argument( + '--os-type', + default=None, + help='OS type (distro)', + ) + parser.add_argument( + '--os-version', + default=None, + help='OS (distro) version such as "12.10"', + ) + parser.add_argument( + '--arch', + default=None, + help='architecture (x86_64, i386, armv7, aarch64)', + ) + parser.add_argument( + '--json-query', + default=None, + help=textwrap.dedent('''\ + JSON fragment, explicitly given, or a file containing + JSON, containing a query for --list or --brief. + Example: teuthology-lock --list --all --json-query + '{"vm_host":{"name":"mira003.front.sepia.ceph.com"}}' + will list all machines who have a vm_host entry + with a dictionary that contains at least the name key + with value mira003.front.sepia.ceph.com. + Note: be careful about quoting and the shell.'''), + ) + + return parser.parse_args(argv) diff --git a/scripts/ls.py b/scripts/ls.py new file mode 100644 index 0000000000..5c9b33be36 --- /dev/null +++ b/scripts/ls.py @@ -0,0 +1,19 @@ +""" +usage: teuthology-ls [-h] [-v] + +List teuthology job results + +positional arguments: + path under which to archive results + +optional arguments: + -h, --help show this help message and exit + -v, --verbose show reasons tests failed +""" +import docopt +import teuthology.ls + + +def main(): + args = docopt.docopt(__doc__) + teuthology.ls.main(args) diff --git a/scripts/nuke.py b/scripts/nuke.py new file mode 100644 index 0000000000..0b1644c3e7 --- /dev/null +++ b/scripts/nuke.py @@ -0,0 +1,47 @@ +import docopt + +import teuthology.nuke + +doc = """ +usage: + teuthology-nuke --help + teuthology-nuke [-v] [--owner OWNER] [-n NAME] [-u] [-i] [-r|-R] [-s] [-k] + [-p PID] [--dry-run] (-t CONFIG... | -a DIR) + teuthology-nuke [-v] [-u] [-i] [-r] [-s] [--dry-run] --owner OWNER --stale + teuthology-nuke [-v] [--dry-run] --stale-openstack + +Reset test machines + +optional arguments: + -h, --help show this help message and exit + -v, --verbose be more verbose + -t CONFIG [CONFIG ...], --targets CONFIG [CONFIG ...] + yaml config containing machines to nuke + -a DIR, --archive DIR + archive path for a job to kill and nuke + --stale attempt to find and nuke 'stale' machines + (e.g. locked by jobs that are no longer running) + --stale-openstack nuke 'stale' OpenStack instances and volumes + and unlock OpenStack targets with no instance + --dry-run Don't actually nuke anything; just print the list of + targets that would be nuked + --owner OWNER job owner + -p PID, --pid PID pid of the process to be killed + -r, --reboot-all reboot all machines (default) + -R, --no-reboot do not reboot the machines + -s, --synch-clocks synchronize clocks on all machines + -u, --unlock Unlock each successfully nuked machine, and output + targets thatcould not be nuked. + -n NAME, --name NAME Name of run to cleanup + -i, --noipmi Skip ipmi checking + -k, --keep-logs Preserve test directories and logs on the machines + +Examples: +teuthology-nuke -t target.yaml --unlock --owner user@host +teuthology-nuke -t target.yaml --pid 1234 --unlock --owner user@host +""" + + +def main(): + args = docopt.docopt(doc) + teuthology.nuke.main(args) diff --git a/scripts/openstack.py b/scripts/openstack.py new file mode 100644 index 0000000000..a9f09332ef --- /dev/null +++ b/scripts/openstack.py @@ -0,0 +1,409 @@ +import argparse +import sys +import os + +import teuthology.openstack + +def main(argv=sys.argv[1:]): + sys.exit(teuthology.openstack.main(parse_args(argv), argv)) + +def get_key_parser(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--key-name', + help='OpenStack keypair name', + ) + parser.add_argument( + '--key-filename', + help='path to the ssh private key. Default: %(default)s', + default=[ + os.environ['HOME'] + '/.ssh/id_rsa', + os.environ['HOME'] + '/.ssh/id_dsa', + os.environ['HOME'] + '/.ssh/id_ecdsa' + ] + ) + return parser + +def get_suite_parser(): + parser = argparse.ArgumentParser() + # copy/pasted from scripts/suite.py + parser.add_argument( + 'config_yaml', + nargs='*', + help='Optional extra job yaml to include', + ) + parser.add_argument( + '-v', '--verbose', + action='store_true', default=None, + help='be more verbose', + ) + parser.add_argument( + '--dry-run', + action='store_true', default=None, + help='Do a dry run; do not schedule anything', + ) + parser.add_argument( + '-s', '--suite', + help='The suite to schedule', + ) + parser.add_argument( + '-c', '--ceph', + help='The ceph branch to run against', + default=os.getenv('TEUTH_CEPH_BRANCH', 'main'), + ) + parser.add_argument( + '-k', '--kernel', + help=('The kernel branch to run against; if not ' + 'supplied, the installed kernel is unchanged'), + ) + parser.add_argument( + '-f', '--flavor', + help=("The ceph packages shaman flavor to run with:" + "('default', 'crimson', 'notcmalloc', 'jaeger')"), + default='default', + ) + parser.add_argument( + '-d', '--distro', + help='Distribution to run against', + ) + parser.add_argument( + '--suite-branch', + help='Use this suite branch instead of the ceph branch', + default=os.getenv('TEUTH_SUITE_BRANCH', 'main'), + ) + parser.add_argument( + '-e', '--email', + help='When tests finish or time out, send an email here', + ) + parser.add_argument( + '-N', '--num', + help='Number of times to run/queue the job', + type=int, + default=1, + ) + parser.add_argument( + '-l', '--limit', + metavar='JOBS', + help='Queue at most this many jobs', + type=int, + ) + parser.add_argument( + '--subset', + help=('Instead of scheduling the entire suite, break the ' + 'set of jobs into pieces (each of which will ' + 'contain each facet at least once) and schedule ' + 'piece . Scheduling 0/, 1/, ' + '2/ ... -1/ will schedule all ' + 'jobs in the suite (many more than once).') + ) + parser.add_argument( + '-p', '--priority', + help='Job priority (lower is sooner)', + type=int, + default=1000, + ) + parser.add_argument( + '--timeout', + help=('How long, in seconds, to wait for jobs to finish ' + 'before sending email. This does not kill jobs.'), + type=int, + default=43200, + ) + parser.add_argument( + '--filter', + help=('Only run jobs whose description contains at least one ' + 'of the keywords in the comma separated keyword ' + 'string specified. ') + ) + parser.add_argument( + '--filter-out', + help=('Do not run jobs whose description contains any of ' + 'the keywords in the comma separated keyword ' + 'string specified. ') + ) + parser.add_argument( + '--throttle', + help=('When scheduling, wait SLEEP seconds between jobs. ' + 'Useful to avoid bursts that may be too hard on ' + 'the underlying infrastructure or exceed OpenStack API ' + 'limits (server creation per minute for instance).'), + type=int, + default=15, + ) + parser.add_argument( + '--suite-relpath', + help=('Look for tasks and suite definitions in this' + 'subdirectory of the suite repo.'), + ) + parser.add_argument( + '-r', '--rerun', + help=('Attempt to reschedule a run, selecting only those' + 'jobs whose status are mentioned by' + '--rerun-status.' + 'Note that this is implemented by scheduling an' + 'entirely new suite and including only jobs whose' + 'descriptions match the selected ones. It does so' + 'using the same logic as --filter.' + 'Of all the flags that were passed when scheduling' + 'the original run, the resulting one will only' + 'inherit the suite value. Any others must be' + 'passed as normal while scheduling with this' + 'feature.'), + ) + parser.add_argument( + '-R', '--rerun-statuses', + help=("A comma-separated list of statuses to be used" + "with --rerun. Supported statuses are: 'dead'," + "'fail', 'pass', 'queued', 'running', 'waiting'"), + default='fail,dead', + ) + parser.add_argument( + '-D', '--distroversion', '--distro-version', + help='Distro version to run against', + ) + parser.add_argument( + '-n', '--newest', + help=('Search for the newest revision built on all' + 'required distro/versions, starting from' + 'either --ceph or --sha1, backtracking' + 'up to commits'), + type=int, + default=0, + ) + parser.add_argument( + '-S', '--sha1', + help=('The ceph sha1 to run against (overrides -c)' + 'If both -S and -c are supplied, -S wins, and' + 'there is no validation that sha1 is contained' + 'in branch') + ) + parser.add_argument( + '--ceph-repo', + help=("Query this repository for Ceph branch and SHA1"), + default=os.getenv('TEUTH_CEPH_REPO', 'https://github.com/ceph/ceph'), + ) + parser.add_argument( + '--suite-repo', + help=("Use tasks and suite definition in this repository"), + default=os.getenv('TEUTH_SUITE_REPO', 'https://github.com/ceph/ceph'), + ) + parser.add_argument( + '--sleep-before-teardown', + help='Number of seconds to sleep before the teardown', + default=0 + ) + return parser + +def get_openstack_parser(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--wait', + action='store_true', default=None, + help='block until the suite is finished', + ) + parser.add_argument( + '--name', + help='OpenStack primary instance name', + default='teuthology', + ) + parser.add_argument( + '--nameserver', + help='nameserver ip address (optional)', + ) + parser.add_argument( + '--simultaneous-jobs', + help='maximum number of jobs running in parallel', + type=int, + default=1, + ) + parser.add_argument( + '--controller-cpus', + help='override default minimum vCPUs when selecting flavor for teuthology VM', + type=int, + default=0, + ) + parser.add_argument( + '--controller-ram', + help='override default minimum RAM (in megabytes) when selecting flavor for teuthology VM', + type=int, + default=0, + ) + parser.add_argument( + '--controller-disk', + help='override default minimum disk size (in gigabytes) when selecting flavor for teuthology VM', + type=int, + default=0, + ) + parser.add_argument( + '--setup', + action='store_true', default=False, + help='deploy the cluster, if it does not exist', + ) + parser.add_argument( + '--teardown', + action='store_true', default=None, + help='destroy the cluster, if it exists', + ) + parser.add_argument( + '--teuthology-git-url', + help="git clone url for teuthology", + default=os.getenv('TEUTH_REPO', 'https://github.com/ceph/teuthology'), + ) + parser.add_argument( + '--teuthology-branch', + help="use this teuthology branch instead of main", + default=os.getenv('TEUTH_BRANCH', 'main'), + ) + parser.add_argument( + '--ceph-workbench-git-url', + help="git clone url for ceph-workbench", + ) + parser.add_argument( + '--ceph-workbench-branch', + help="use this ceph-workbench branch instead of main", + default='main', + ) + parser.add_argument( + '--upload', + action='store_true', default=False, + help='upload archives to an rsync server', + ) + parser.add_argument( + '--archive-upload', + help='rsync destination to upload archives', + default='ubuntu@teuthology-logs.public.ceph.com:./', + ) + parser.add_argument( + '--archive-upload-url', + help='Public facing URL where archives are uploaded', + default='http://teuthology-logs.public.ceph.com', + ) + parser.add_argument( + '--test-repo', + action='append', + help=('Package repository to be added on test nodes, which are specified ' + 'as NAME:URL, NAME!PRIORITY:URL or @FILENAME, for details see below.'), + default=None, + ) + parser.add_argument( + '--no-canonical-tags', + action='store_true', default=False, + help='configure remote teuthology to not fetch tags from http://github.com/ceph/ceph.git in buildpackages task', + ) + return parser + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + parents=[ + get_suite_parser(), + get_key_parser(), + get_openstack_parser(), + ], + conflict_handler='resolve', + add_help=False, + epilog="""test repos: + +Test repository can be specified using --test-repo optional argument +with value in the following formats: NAME:URL, NAME!PRIORITY:URL +or @FILENAME. See examples: + +1) Essential usage requires to provide repo name and url: + + --test-repo foo:http://example.com/repo/foo + +2) Repo can be prioritized by adding a number after '!' symbol + in the name: + + --test-repo 'bar!10:http://example.com/repo/bar' + +3) Repo data can be taken from a file by simply adding '@' symbol + at the beginning argument value, for example from yaml: + + --test-repo @path/to/foo.yaml + + where `foo.yaml` contains one or more records like: + + - name: foo + priority: 1 + url: http://example.com/repo/foo + +4) Or from json file: + + --test-repo @path/to/foo.json + + where `foo.json` content is: + + [{"name":"foo","priority":1,"url":"http://example.com/repo/foo"}] + + +Several repos can be provided with multiple usage of --test-repo and/or +you can provide several repos within one yaml or json file. +The repositories are added in the order they appear in the command line or +in the file. Example: + + --- + # The foo0 repo will be included first, after all that have any priority, + # in particular after foo1 because it has lowest priority + - name: foo0 + url: http://example.com/repo/foo0 + # The foo1 will go after foo2 because it has lower priority then foo2 + - name: foo1 + url: http://example.com/repo/foo1 + priority: 2 + # The foo2 will go first because it has highest priority + - name: foo2 + url: http://example.com/repo/foo2 + priority: 1 + # The foo3 will go after foo0 because it appears after it in this file + - name: foo3 + url: http://example.com/repo/foo3 + +Equivalent json file content below: + + [ + { + "name": "foo0", + "url": "http://example.com/repo/foo0" + }, + { + "name": "foo1", + "url": "http://example.com/repo/foo1", + "priority": 2 + }, + { + "name": "foo2", + "url": "http://example.com/repo/foo2", + "priority": 1 + }, + { + "name": "foo3", + "url": "http://example.com/repo/foo3" + } + ] + +At the moment supported only files with extensions: .yaml, .yml, .json, .jsn. + +teuthology-openstack %s +""" % teuthology.__version__, + description=""" +Run a suite of ceph integration tests. A suite is a directory containing +facets. A facet is a directory containing config snippets. Running a suite +means running teuthology for every configuration combination generated by +taking one config snippet from each facet. Any config files passed on the +command line will be used for every combination, and will override anything in +the suite. By specifying a subdirectory in the suite argument, it is possible +to limit the run to a specific facet. For instance -s upgrade/dumpling-x only +runs the dumpling-x facet of the upgrade suite. + +Display the http and ssh access to follow the progress of the suite +and analyze results. + + firefox http://183.84.234.3:8081/ + ssh -i teuthology-admin.pem ubuntu@183.84.234.3 + +""") + return parser + +def parse_args(argv): + return get_parser().parse_args(argv) diff --git a/scripts/prune_logs.py b/scripts/prune_logs.py new file mode 100644 index 0000000000..424b4b7b7d --- /dev/null +++ b/scripts/prune_logs.py @@ -0,0 +1,38 @@ +import docopt + +import teuthology.config +import teuthology.prune + +doc = """ +usage: + teuthology-prune-logs -h + teuthology-prune-logs [-v] [options] + +Prune old logfiles from the archive + +optional arguments: + -h, --help Show this help message and exit + -v, --verbose Be more verbose + -a ARCHIVE, --archive ARCHIVE + The base archive directory + [default: {archive_base}] + --dry-run Don't actually delete anything; just log what would be + deleted + -p DAYS, --pass DAYS Remove all logs for jobs which passed and are older + than DAYS. Negative values will skip this operation. + [default: 14] + -f DAYS, --fail DAYS Like --pass, but for failed jobs. [default: -1] + -r DAYS, --remotes DAYS + Remove the 'remote' subdir of jobs older than DAYS. + Negative values will skip this operation. + [default: 60] + -z DAYS, --compress DAYS + Compress (using gzip) any teuthology.log files older + than DAYS. Negative values will skip this operation. + [default: 30] +""".format(archive_base=teuthology.config.config.archive_base) + + +def main(): + args = docopt.docopt(doc) + teuthology.prune.main(args) diff --git a/scripts/queue.py b/scripts/queue.py new file mode 100644 index 0000000000..8ea5ca5c2c --- /dev/null +++ b/scripts/queue.py @@ -0,0 +1,36 @@ +import docopt + +import teuthology.config +import teuthology.beanstalk + +doc = """ +usage: teuthology-queue -h + teuthology-queue [-s|-d|-f] -m MACHINE_TYPE + teuthology-queue [-r] -m MACHINE_TYPE + teuthology-queue -m MACHINE_TYPE -D PATTERN + teuthology-queue -p SECONDS [-m MACHINE_TYPE] + +List Jobs in queue. +If -D is passed, then jobs with PATTERN in the job name are deleted from the +queue. + +Arguments: + -m, --machine_type MACHINE_TYPE [default: multi] + Which machine type queue to work on. + +optional arguments: + -h, --help Show this help message and exit + -D, --delete PATTERN Delete Jobs with PATTERN in their name + -d, --description Show job descriptions + -r, --runs Only show run names + -f, --full Print the entire job config. Use with caution. + -s, --status Prints the status of the queue + -p, --pause SECONDS Pause queues for a number of seconds. A value of 0 + will unpause. If -m is passed, pause that queue, + otherwise pause all queues. +""" + + +def main(): + args = docopt.docopt(doc) + teuthology.beanstalk.main(args) diff --git a/scripts/reimage.py b/scripts/reimage.py new file mode 100644 index 0000000000..42ec6e8ffe --- /dev/null +++ b/scripts/reimage.py @@ -0,0 +1,25 @@ +import docopt +import sys + +import teuthology.reimage + +doc = """ +usage: teuthology-reimage --help + teuthology-reimage --os-type distro --os-version version [options] ... + +Reimage nodes without locking using specified distro type and version. +The nodes must be locked by the current user, otherwise an error occurs. +Custom owner can be specified in order to provision someone else nodes. +Reimaging unlocked nodes cannot be provided. + +Standard arguments: + -h, --help Show this help message and exit + -v, --verbose Be more verbose + --os-type Distro type like: rhel, ubuntu, etc. + --os-version Distro version like: 7.6, 16.04, etc. + --owner user@host Owner of the locked machines +""" + +def main(argv=sys.argv[1:]): + args = docopt.docopt(doc, argv=argv) + return teuthology.reimage.main(args) diff --git a/scripts/report.py b/scripts/report.py new file mode 100644 index 0000000000..d2b39d3c5a --- /dev/null +++ b/scripts/report.py @@ -0,0 +1,42 @@ +import docopt + +import teuthology.report + +doc = """ +usage: + teuthology-report -h + teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] [-D] -r RUN ... + teuthology-report [-v] [-s SERVER] [-a ARCHIVE] [-D] -r RUN -j JOB ... + teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] --all-runs + +Submit test results to a web service + +optional arguments: + -h, --help show this help message and exit + -a ARCHIVE, --archive ARCHIVE + The base archive directory + [default: {archive_base}] + -r [RUN ...], --run [RUN ...] + A run (or list of runs) to submit + -j [JOB ...], --job [JOB ...] + A job (or list of jobs) to submit + --all-runs Submit all runs in the archive + -R, --refresh Re-push any runs already stored on the server. Note + that this may be slow. + -s SERVER, --server SERVER + "The server to post results to, e.g. + http://localhost:8080/ . May also be specified in + ~/.teuthology.yaml as 'results_server' + -n, --no-save By default, when submitting all runs, we remember the + last successful submission in a file called + 'last_successful_run'. Pass this flag to disable that + behavior. + -D, --dead Mark all given jobs (or entire runs) with status + 'dead'. Implies --refresh. + -v, --verbose be more verbose +""".format(archive_base=teuthology.config.config.archive_base) + + +def main(): + args = docopt.docopt(doc) + teuthology.report.main(args) diff --git a/scripts/results.py b/scripts/results.py new file mode 100644 index 0000000000..99e70a3fd0 --- /dev/null +++ b/scripts/results.py @@ -0,0 +1,25 @@ +""" +usage: teuthology-results [-h] [-v] [--dry-run] [--email EMAIL] [--timeout TIMEOUT] --archive-dir DIR --name NAME [--subset SUBSET] [--seed SEED] [--no-nested-subset] + +Email teuthology suite results + +optional arguments: + -h, --help show this help message and exit + -v, --verbose be more verbose + --dry-run Instead of sending the email, just print it + --email EMAIL address to email test failures to + --timeout TIMEOUT how many seconds to wait for all tests to finish + [default: 0] + --archive-dir DIR path under which results for the suite are stored + --name NAME name of the suite + --subset SUBSET subset passed to teuthology-suite + --seed SEED random seed used in teuthology-suite + --no-nested-subset disable nested subsets used in teuthology-suite +""" +import docopt +import teuthology.results + + +def main(): + args = docopt.docopt(__doc__) + teuthology.results.main(args) diff --git a/scripts/run.py b/scripts/run.py new file mode 100644 index 0000000000..20ee6ef3bc --- /dev/null +++ b/scripts/run.py @@ -0,0 +1,38 @@ +""" +usage: teuthology --help + teuthology --version + teuthology [options] [--] ... + +Run ceph integration tests + +positional arguments: + one or more config files to read + +optional arguments: + -h, --help show this help message and exit + -v, --verbose be more verbose + --version the current installed version of teuthology + -a DIR, --archive DIR path to archive results in + --description DESCRIPTION job description + --owner OWNER job owner + --lock lock machines for the duration of the run + --machine-type MACHINE_TYPE Type of machine to lock/run tests on. + --os-type OS_TYPE Distro/OS of machine to run test on. + --os-version OS_VERSION Distro/OS version of machine to run test on. + --block block until locking machines succeeds (use with --lock) + --name NAME name for this teuthology run + --suite-path SUITE_PATH Location of ceph-qa-suite on disk. If not specified, + it will be fetched + --interactive-on-error drop to a python shell on failure, which will + halt the job; developer can then ssh to targets + and examine cluster state. + +""" +import docopt + +import teuthology.run + + +def main(): + args = docopt.docopt(__doc__, version=teuthology.__version__) + teuthology.run.main(args) diff --git a/scripts/schedule.py b/scripts/schedule.py new file mode 100644 index 0000000000..58f7a46249 --- /dev/null +++ b/scripts/schedule.py @@ -0,0 +1,61 @@ +import docopt + +import teuthology.misc +import teuthology.schedule +import sys + +doc = """ +usage: teuthology-schedule -h + teuthology-schedule [options] --name [--] [ ...] + +Schedule ceph integration tests + +positional arguments: + Config file to read + "-" indicates read stdin. + +optional arguments: + -h, --help Show this help message and exit + -v, --verbose Be more verbose + -b , --queue-backend + Queue backend name, use prefix '@' + to append job config to the given + file path as yaml. + [default: beanstalk] + -n , --name Name of suite run the job is part of + -d , --description Job description + -o , --owner Job owner + -w , --worker Which worker to use (type of machine) + [default: plana] + -p , --priority Job priority (lower is sooner) + [default: 1000] + -N , --num Number of times to run/queue the job + [default: 1] + + --first-in-suite Mark the first job in a suite so suite + can note down the rerun-related info + [default: False] + --last-in-suite Mark the last job in a suite so suite + post-processing can be run + [default: False] + --email Where to send the results of a suite. + Only applies to the last job in a suite. + --timeout How many seconds to wait for jobs to + finish before emailing results. Only + applies to the last job in a suite. + --seed The random seed for rerunning the suite. + Only applies to the last job in a suite. + --subset The subset option passed to teuthology-suite. + Only applies to the last job in a suite. + --no-nested-subset The no-nested-subset option passed to + teuthology-suite. + Only applies to the last job in a suite. + --dry-run Instead of scheduling, just output the + job config. + +""" + + +def main(argv=sys.argv[1:]): + args = docopt.docopt(doc, argv=argv) + teuthology.schedule.main(args) diff --git a/scripts/suite.py b/scripts/suite.py new file mode 100644 index 0000000000..5cafee024c --- /dev/null +++ b/scripts/suite.py @@ -0,0 +1,219 @@ +import docopt +import sys + +import teuthology.suite +from teuthology.suite import override_arg_defaults as defaults +from teuthology.config import config + +doc = """ +usage: teuthology-suite --help + teuthology-suite [-v | -vv ] --suite [options] [...] + teuthology-suite [-v | -vv ] --rerun [options] [...] + +Run a suite of ceph integration tests. A suite is a directory containing +facets. A facet is a directory containing config snippets. Running a suite +means running teuthology for every configuration combination generated by +taking one config snippet from each facet. Any config files passed on the +command line will be used for every combination, and will override anything in +the suite. By specifying a subdirectory in the suite argument, it is possible +to limit the run to a specific facet. For instance -s upgrade/dumpling-x only +runs the dumpling-x facet of the upgrade suite. + +Miscellaneous arguments: + -h, --help Show this help message and exit + -v, --verbose Be more verbose + --dry-run Do a dry run; do not schedule anything. In + combination with -vv, also call + teuthology-schedule with --dry-run. + -y, --non-interactive Do not ask question and say yes when + it is possible. + +Standard arguments: + Optional extra job yaml to include + -s , --suite + The suite to schedule + --wait Block until the suite is finished + -c , --ceph The ceph branch to run against + [default: {default_ceph_branch}] + -S , --sha1 The ceph sha1 to run against (overrides -c) + If both -S and -c are supplied, -S wins, and + there is no validation that sha1 is contained + in branch + -n , --newest + Search for the newest revision built on all + required distro/versions, starting from + either --ceph or --sha1, backtracking + up to commits [default: 0] + -k , --kernel + The kernel branch to run against, + use 'none' to bypass kernel task. + [default: distro] + -f , --flavor + The ceph packages shaman flavor to run with: + ('default', 'crimson', 'notcmalloc', 'jaeger') + [default: default] + -t , --teuthology-branch + The teuthology branch to run against. + Default value is determined in the next order. + There is TEUTH_BRANCH environment variable set. + There is `qa/.teuthology_branch` present in + the suite repo and contains non-empty string. + There is `teuthology_branch` present in one of + the user or system `teuthology.yaml` configuration + files respectively, otherwise use `main`. + -m , --machine-type + Machine type [default: {default_machine_type}] + -d , --distro + Distribution to run against + -D , --distro-version + Distro version to run against + --ceph-repo Query this repository for Ceph branch and SHA1 + values [default: {default_ceph_repo}] + --suite-repo Use tasks and suite definition in this repository + [default: {default_suite_repo}] + --suite-relpath + Look for tasks and suite definitions in this + subdirectory of the suite repo. + [default: qa] + --suite-branch + Use this suite branch instead of the ceph branch + --suite-dir Use this alternative directory as-is when + assembling jobs from yaml fragments. This causes + to be ignored for scheduling + purposes, but it will still be used for test + running. The must have `qa/suite` + sub-directory. + --validate-sha1 + Validate that git SHA1s passed to -S exist. + [default: true] + --sleep-before-teardown + Number of seconds to sleep before teardown. + Use with care, as this applies to all jobs in the + run. This option is used along with --limit one. + If the --limit ommitted then it's forced to 1. + If the --limit is greater than 4, then user must + confirm it interactively to avoid massive lock + of resources, however --non-interactive option + can be used to skip user input. + [default: 0] + --arch Override architecture defaults, for example, + aarch64, armv7l, x86_64. Normally this + argument should not be provided and the arch + is determined from --machine-type. + +Scheduler arguments: + --owner Job owner + -b , --queue-backend + Scheduler queue backend name + -e , --email + When tests finish or time out, send an email + here. May also be specified in ~/.teuthology.yaml + as 'results_email' + --rocketchat Comma separated list of Rocket.Chat channels where + to send a message when tests finished or time out. + To be used with --sleep-before-teardown option. + -N , --num Number of times to run/queue the job + [default: 1] + -l , --limit Queue at most this many jobs + [default: 0] + --subset Instead of scheduling the entire suite, break the + set of jobs into pieces (each of which will + contain each facet at least once) and schedule + piece . Scheduling 0/, 1/, + 2/ ... -1/ will schedule all + jobs in the suite (many more than once). + -p , --priority + Job priority (lower is sooner) + [default: 1000] + --timeout How long, in seconds, to wait for jobs to finish + before sending email. This does not kill jobs. + [default: {default_results_timeout}] + --filter KEYWORDS Only run jobs whose description contains at least one + of the keywords in the comma separated keyword + string specified. + --filter-out KEYWORDS Do not run jobs whose description contains any of + the keywords in the comma separated keyword + string specified. + --filter-all KEYWORDS Only run jobs whose description contains each one + of the keywords in the comma separated keyword + string specified. + -F, --filter-fragments + Check yaml fragments too if job description + does not match the filters provided with + options --filter, --filter-out, and --filter-all. + [default: false] + --archive-upload RSYNC_DEST Rsync destination to upload archives. + --archive-upload-url URL Public facing URL where archives are uploaded. + --throttle SLEEP When scheduling, wait SLEEP seconds between jobs. + Useful to avoid bursts that may be too hard on + the underlying infrastructure or exceed OpenStack API + limits (server creation per minute for instance). + -r, --rerun Attempt to reschedule a run, selecting only those + jobs whose status are mentioned by + --rerun-status. + Note that this is implemented by scheduling an + entirely new suite and including only jobs whose + descriptions match the selected ones. It does so + using the same logic as --filter. + Of all the flags that were passed when scheduling + the original run, the resulting one will only + inherit the suite value. Any others must be + passed as normal while scheduling with this + feature. For random tests involving facet whose + path ends with '$' operator, you might want to + use --seed argument to repeat them. + -R, --rerun-statuses + A comma-separated list of statuses to be used + with --rerun. Supported statuses are: 'dead', + 'fail', 'pass', 'queued', 'running', 'waiting' + [default: fail,dead] + --seed SEED An random number mostly useful when used along + with --rerun argument. This number can be found + in the output of teuthology-suite command. -1 + for a random seed [default: -1]. + --force-priority Skip the priority check. + --job-threshold Do not allow to schedule the run if the number + of jobs exceeds . Use 0 to allow + any number [default: {default_job_threshold}]. + --no-nested-subset Do not perform nested suite subsets. + ++=================+=================================================================+ +| Priority | Explanation | ++=================+=================================================================+ +| N < 10 | Use this if the sky is falling and some group of tests | +| | must be run ASAP. | ++-----------------+-----------------------------------------------------------------+ +| 10 <= N < 50 | Use this if your tests are urgent and blocking other | +| | important development. | ++-----------------+-----------------------------------------------------------------+ +| 50 <= N < 75 | Use this if you are testing a particular feature/fix | +| | and running fewer than about 25 jobs. This range is also | +| | used for urgent release testing. | ++-----------------+-----------------------------------------------------------------+ +| 75 <= N < 100 | Tech Leads regularly schedule integration tests with this | +| | priority to verify pull requests against main. | ++-----------------+-----------------------------------------------------------------+ +| 100 <= N < 150 | This priority is used for QE validation of point releases. | ++-----------------+-----------------------------------------------------------------+ +| 150 <= N < 200 | Use this priority for 100 jobs or fewer that test a particular | +| | feature or fix. Results are available in about 24 hours. | ++-----------------+-----------------------------------------------------------------+ +| 200 <= N < 1000 | Use this priority for large test runs. Results are available | +| | in about a week. | ++-----------------+-----------------------------------------------------------------+ + +""".format( + default_machine_type=config.default_machine_type, + default_results_timeout=config.results_timeout, + default_ceph_repo=defaults('--ceph-repo', + config.get_ceph_git_url()), + default_suite_repo=defaults('--suite-repo', + config.get_ceph_qa_suite_git_url()), + default_ceph_branch=defaults('--ceph-branch', 'main'), + default_job_threshold=config.job_threshold, +) + + +def main(argv=sys.argv[1:]): + args = docopt.docopt(doc, argv=argv) + return teuthology.suite.main(args) diff --git a/scripts/test/script.py b/scripts/test/script.py new file mode 100644 index 0000000000..fdabd1b553 --- /dev/null +++ b/scripts/test/script.py @@ -0,0 +1,16 @@ +import subprocess +from pytest import raises + + +class Script(object): + script_name = 'teuthology' + + def test_help(self): + args = (self.script_name, '--help') + out = subprocess.check_output(args).decode() + assert out.startswith('usage') + + def test_invalid(self): + args = (self.script_name, '--invalid-option') + with raises(subprocess.CalledProcessError): + subprocess.check_call(args) diff --git a/scripts/test/test_lock.py b/scripts/test/test_lock.py new file mode 100644 index 0000000000..3fc803aae6 --- /dev/null +++ b/scripts/test/test_lock.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestLock(Script): + script_name = 'teuthology-lock' diff --git a/scripts/test/test_ls.py b/scripts/test/test_ls.py new file mode 100644 index 0000000000..d0e4d81451 --- /dev/null +++ b/scripts/test/test_ls.py @@ -0,0 +1,15 @@ +import docopt + +from script import Script +from scripts import ls + +doc = ls.__doc__ + + +class TestLs(Script): + script_name = 'teuthology-ls' + + def test_args(self): + args = docopt.docopt(doc, ["--verbose", "some/archive/dir"]) + assert args["--verbose"] + assert args[""] == "some/archive/dir" diff --git a/scripts/test/test_nuke.py b/scripts/test/test_nuke.py new file mode 100644 index 0000000000..fa615c4665 --- /dev/null +++ b/scripts/test/test_nuke.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestNuke(Script): + script_name = 'teuthology-nuke' diff --git a/scripts/test/test_prune_logs.py b/scripts/test/test_prune_logs.py new file mode 100644 index 0000000000..8e967522f0 --- /dev/null +++ b/scripts/test/test_prune_logs.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestPruneLogs(Script): + script_name = 'teuthology-prune-logs' diff --git a/scripts/test/test_report.py b/scripts/test/test_report.py new file mode 100644 index 0000000000..c8065fd1f8 --- /dev/null +++ b/scripts/test/test_report.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestReport(Script): + script_name = 'teuthology-report' diff --git a/scripts/test/test_results.py b/scripts/test/test_results.py new file mode 100644 index 0000000000..a97981cb6b --- /dev/null +++ b/scripts/test/test_results.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestResults(Script): + script_name = 'teuthology-results' diff --git a/scripts/test/test_run.py b/scripts/test/test_run.py new file mode 100644 index 0000000000..74fa1b9263 --- /dev/null +++ b/scripts/test/test_run.py @@ -0,0 +1,45 @@ +import docopt + +from script import Script +from scripts import run + +doc = run.__doc__ + + +class TestRun(Script): + script_name = 'teuthology' + + def test_all_args(self): + args = docopt.docopt(doc, [ + "--verbose", + "--archive", "some/archive/dir", + "--description", "the_description", + "--owner", "the_owner", + "--lock", + "--machine-type", "machine_type", + "--os-type", "os_type", + "--os-version", "os_version", + "--block", + "--name", "the_name", + "--suite-path", "some/suite/dir", + "path/to/config.yml", + ]) + assert args["--verbose"] + assert args["--archive"] == "some/archive/dir" + assert args["--description"] == "the_description" + assert args["--owner"] == "the_owner" + assert args["--lock"] + assert args["--machine-type"] == "machine_type" + assert args["--os-type"] == "os_type" + assert args["--os-version"] == "os_version" + assert args["--block"] + assert args["--name"] == "the_name" + assert args["--suite-path"] == "some/suite/dir" + assert args[""] == ["path/to/config.yml"] + + def test_multiple_configs(self): + args = docopt.docopt(doc, [ + "config1.yml", + "config2.yml", + ]) + assert args[""] == ["config1.yml", "config2.yml"] diff --git a/scripts/test/test_schedule.py b/scripts/test/test_schedule.py new file mode 100644 index 0000000000..e89f983a7a --- /dev/null +++ b/scripts/test/test_schedule.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestSchedule(Script): + script_name = 'teuthology-schedule' diff --git a/scripts/test/test_suite.py b/scripts/test/test_suite.py new file mode 100644 index 0000000000..062aba470d --- /dev/null +++ b/scripts/test/test_suite.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestSuite(Script): + script_name = 'teuthology-suite' diff --git a/scripts/test/test_updatekeys.py b/scripts/test/test_updatekeys.py new file mode 100644 index 0000000000..c4122b0f78 --- /dev/null +++ b/scripts/test/test_updatekeys.py @@ -0,0 +1,21 @@ +from script import Script +import subprocess +from pytest import raises +from pytest import skip + + +class TestUpdatekeys(Script): + script_name = 'teuthology-updatekeys' + + def test_invalid(self): + skip("teuthology.lock needs to be partially refactored to allow" + + "teuthology-updatekeys to return nonzero in all erorr cases") + + def test_all_and_targets(self): + args = (self.script_name, '-a', '-t', 'foo') + with raises(subprocess.CalledProcessError): + subprocess.check_call(args) + + def test_no_args(self): + with raises(subprocess.CalledProcessError): + subprocess.check_call(self.script_name) diff --git a/scripts/test/test_worker.py b/scripts/test/test_worker.py new file mode 100644 index 0000000000..8e76c43a5c --- /dev/null +++ b/scripts/test/test_worker.py @@ -0,0 +1,5 @@ +from script import Script + + +class TestWorker(Script): + script_name = 'teuthology-worker' diff --git a/scripts/update_inventory.py b/scripts/update_inventory.py new file mode 100644 index 0000000000..014e3ccf5c --- /dev/null +++ b/scripts/update_inventory.py @@ -0,0 +1,41 @@ +import docopt + +import teuthology +import teuthology.lock +import teuthology.lock.ops +import teuthology.misc +import teuthology.orchestra.remote + +import logging + +doc = """ +usage: teuthology-update-inventory -h + teuthology-update-inventory [-v] [-m type] REMOTE [REMOTE ...] + +Update the given nodes' inventory information on the lock server + + + -h, --help show this help message and exit + -v, --verbose be more verbose + -m , --machine-type optionally specify a machine type when + submitting nodes for the first time + REMOTE hostnames of machines whose information to update + +""" + + +def main(): + args = docopt.docopt(doc) + if args['--verbose']: + teuthology.log.setLevel(logging.DEBUG) + + machine_type = args.get('--machine-type') + remotes = args.get('REMOTE') + for rem_name in remotes: + rem_name = teuthology.misc.canonicalize_hostname(rem_name) + remote = teuthology.orchestra.remote.Remote(rem_name) + remote.connect() + inventory_info = remote.inventory_info + if machine_type: + inventory_info['machine_type'] = machine_type + teuthology.lock.ops.update_inventory(inventory_info) diff --git a/scripts/updatekeys.py b/scripts/updatekeys.py new file mode 100644 index 0000000000..394ae32bb1 --- /dev/null +++ b/scripts/updatekeys.py @@ -0,0 +1,31 @@ +import docopt +import sys + +import teuthology.lock +import teuthology.lock.cli + +doc = """ +usage: teuthology-updatekeys -h + teuthology-updatekeys [-v] -t + teuthology-updatekeys [-v] ... + teuthology-updatekeys [-v] -a + +Update any hostkeys that have changed. You can list specific machines to run +on, or use -a to check all of them automatically. + +positional arguments: + MACHINES hosts to check for updated keys + +optional arguments: + -h, --help Show this help message and exit + -v, --verbose Be more verbose + -t , --targets + Input yaml containing targets to check + -a, --all Update hostkeys of all machines in the db +""" + + +def main(): + args = docopt.docopt(doc) + status = teuthology.lock.cli.updatekeys(args) + sys.exit(status) diff --git a/scripts/wait.py b/scripts/wait.py new file mode 100644 index 0000000000..6b2ff34a91 --- /dev/null +++ b/scripts/wait.py @@ -0,0 +1,31 @@ +import docopt +import sys + +import logging + +import teuthology +import teuthology.suite +from teuthology.config import config + +doc = """ +usage: teuthology-wait --help + teuthology-wait [-v] --run + +Wait until run is finished. Returns exit code 0 on success, otherwise 1. + +Miscellaneous arguments: + -h, --help Show this help message and exit + -v, --verbose Be more verbose + +Standard arguments: + -r, --run Run name to watch. +""" + + +def main(argv=sys.argv[1:]): + args = docopt.docopt(doc, argv=argv) + if args.get('--verbose'): + teuthology.log.setLevel(logging.DEBUG) + name = args.get('--run') + return teuthology.suite.wait(name, config.max_job_time, None) + diff --git a/scripts/worker.py b/scripts/worker.py new file mode 100644 index 0000000000..a3e12c20d7 --- /dev/null +++ b/scripts/worker.py @@ -0,0 +1,37 @@ +import argparse + +import teuthology.worker + + +def main(): + teuthology.worker.main(parse_args()) + + +def parse_args(): + parser = argparse.ArgumentParser(description=""" +Grab jobs from a beanstalk queue and run the teuthology tests they +describe. One job is run at a time. +""") + parser.add_argument( + '-v', '--verbose', + action='store_true', default=None, + help='be more verbose', + ) + parser.add_argument( + '--archive-dir', + metavar='DIR', + help='path under which to archive results', + required=True, + ) + parser.add_argument( + '-l', '--log-dir', + help='path in which to store logs', + required=True, + ) + parser.add_argument( + '-t', '--tube', + help='which beanstalk tube to read jobs from', + required=True, + ) + + return parser.parse_args() diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..f5de27bf92 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,129 @@ +[metadata] +name = teuthology +long_description = file: README.rst +long_description_content_type = text/x-rst +url = https://github.com/ceph/teuthology +author = Red Hat, Inc. +license = MIT +license_file = LICENSE +classifiers = + Intended Audience :: Developers + License :: OSI Approved :: MIT License + Natural Language :: English + Operating System :: POSIX :: Linux + Programming Language :: Python :: 3 + Programming Language :: Python :: 3 :: Only + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: Implementation :: CPython + Topic :: Software Development :: Quality Assurance + Topic :: Software Development :: Testing + Topic :: System :: Distributed Computing + Topic :: System :: Filesystems +description_content_type = text/x-rst; charset=UTF-8 +description_file = README.rst +keywords = teuthology, test, ceph, cluster +summary = Ceph test framework + +[options] +packages = find: +install_requires = + PyYAML + ansible>=2.10,<3.0 + apache-libcloud + argparse>=1.2.1 + backports.ssl-match-hostname + beanstalkc3>=0.4.0 + configobj + configparser + docopt + gevent + httplib2 + humanfriendly + lupa + ndg-httpsclient + netaddr + paramiko + pexpect + pip-tools + prettytable + psutil>=2.1.0 + pyasn1 + pynacl>=1.5.0 + pyopenssl>=0.13 + python-dateutil + python-novaclient + python-openstackclient + requests>2.13.0 + sentry-sdk +python_requires = >=3.6 + +[options.entry_points] +console_scripts = + teuthology = scripts.run:main + teuthology-openstack = scripts.openstack:main + teuthology-nuke = scripts.nuke:main + teuthology-suite = scripts.suite:main + teuthology-ls = scripts.ls:main + teuthology-worker = scripts.worker:main + teuthology-lock = scripts.lock:main + teuthology-schedule = scripts.schedule:main + teuthology-updatekeys = scripts.updatekeys:main + teuthology-update-inventory = scripts.update_inventory:main + teuthology-results = scripts.results:main + teuthology-report = scripts.report:main + teuthology-kill = scripts.kill:main + teuthology-queue = scripts.queue:main + teuthology-prune-logs = scripts.prune_logs:main + teuthology-describe = scripts.describe:main + teuthology-reimage = scripts.reimage:main + teuthology-dispatcher = scripts.dispatcher:main + teuthology-wait = scripts.wait:main + +[options.extras_require] +manhole = + manhole +rocketchat = + rocket-python>=1.2.15 +sentry = + sentry-sdk +test = + PyJWT + boto>=2.0b4 + boto3 + cryptography>=2.7 + ipy + mock + nose + pytest + toml + tox + xmltodict + +[options.package_data] +teuthology.openstack = + archive-key + archive-key.pub + openstack-centos-6.5-user-data.txt + openstack-centos-7.0-user-data.txt + openstack-centos-7.1-user-data.txt + openstack-centos-7.2-user-data.txt + openstack-debian-8.0-user-data.txt + openstack-opensuse-42.1-user-data.txt + openstack-teuthology.cron + openstack-teuthology.init + openstack-ubuntu-12.04-user-data.txt + openstack-ubuntu-14.04-user-data.txt + openstack-user-data.txt + openstack.yaml + setup-openstack.sh +teuthology.suite = + fragment-merge.lua +teuthology.task.install = + adjust-ulimits + daemon-helper +teuthology.task.internal = + edit_sudoers.sh diff --git a/teuthology/__init__.py b/teuthology/__init__.py new file mode 100644 index 0000000000..4781f59bc1 --- /dev/null +++ b/teuthology/__init__.py @@ -0,0 +1,109 @@ +from __future__ import print_function +import os +try: + import importlib.metadata as importlib_metadata +except ImportError: + import importlib_metadata + +__version__ = importlib_metadata.version("teuthology") + +# Tell gevent not to patch os.waitpid() since it is susceptible to race +# conditions. See: +# http://www.gevent.org/gevent.monkey.html#gevent.monkey.patch_os +os.environ['GEVENT_NOWAITPID'] = 'true' + +# Use manhole to give us a way to debug hung processes +# https://pypi.python.org/pypi/manhole +try: + import manhole + manhole.install( + verbose=False, + # Listen for SIGUSR1 + oneshot_on="USR1" + ) +except ImportError: + pass +from gevent import monkey +monkey.patch_all( + dns=False, + # Don't patch subprocess to avoid http://tracker.ceph.com/issues/14990 + subprocess=False, +) +import sys +from gevent.hub import Hub + +# Don't write pyc files +sys.dont_write_bytecode = True + +from teuthology.orchestra import monkey +monkey.patch_all() + +import logging + +# If we are running inside a virtualenv, ensure we have its 'bin' directory in +# our PATH. This doesn't happen automatically if scripts are called without +# first activating the virtualenv. +exec_dir = os.path.abspath(os.path.dirname(sys.argv[0])) +if os.path.split(exec_dir)[-1] == 'bin' and exec_dir not in os.environ['PATH']: + os.environ['PATH'] = ':'.join((exec_dir, os.environ['PATH'])) + +# We don't need to see log entries for each connection opened +logging.getLogger('requests.packages.urllib3.connectionpool').setLevel( + logging.WARN) +# if requests doesn't bundle it, shut it up anyway +logging.getLogger('urllib3.connectionpool').setLevel( + logging.WARN) + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s') +log = logging.getLogger(__name__) + +log.debug('teuthology version: %s', __version__) + + +def setup_log_file(log_path): + root_logger = logging.getLogger() + handlers = root_logger.handlers + for handler in handlers: + if isinstance(handler, logging.FileHandler) and \ + handler.stream.name == log_path: + log.debug("Already logging to %s; not adding new handler", + log_path) + return + formatter = logging.Formatter( + fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s', + datefmt='%Y-%m-%dT%H:%M:%S') + handler = logging.FileHandler(filename=log_path) + handler.setFormatter(formatter) + root_logger.addHandler(handler) + root_logger.info('teuthology version: %s', __version__) + + +def install_except_hook(): + """ + Install an exception hook that first logs any uncaught exception, then + raises it. + """ + def log_exception(exc_type, exc_value, exc_traceback): + if not issubclass(exc_type, KeyboardInterrupt): + log.critical("Uncaught exception", exc_info=(exc_type, exc_value, + exc_traceback)) + sys.__excepthook__(exc_type, exc_value, exc_traceback) + sys.excepthook = log_exception + + +def patch_gevent_hub_error_handler(): + Hub._origin_handle_error = Hub.handle_error + + def custom_handle_error(self, context, type, value, tb): + if context is None or issubclass(type, Hub.SYSTEM_ERROR): + self.handle_system_error(type, value) + elif issubclass(type, Hub.NOT_ERROR): + pass + else: + log.error("Uncaught exception (Hub)", exc_info=(type, value, tb)) + + Hub.handle_error = custom_handle_error + +patch_gevent_hub_error_handler() diff --git a/teuthology/beanstalk.py b/teuthology/beanstalk.py new file mode 100644 index 0000000000..41e2acedd1 --- /dev/null +++ b/teuthology/beanstalk.py @@ -0,0 +1,215 @@ +import beanstalkc +import json +import yaml +import logging +import pprint +import sys +from collections import OrderedDict + +from teuthology.config import config +from teuthology import report + +log = logging.getLogger(__name__) + + +def connect(): + host = config.queue_host + port = config.queue_port + if host is None or port is None: + raise RuntimeError( + 'Beanstalk queue information not found in {conf_path}'.format( + conf_path=config.teuthology_yaml)) + return beanstalkc.Connection(host=host, port=port, parse_yaml=yaml.safe_load) + + +def watch_tube(connection, tube_name): + """ + Watch a given tube, potentially correcting to 'multi' if necessary. Returns + the tube_name that was actually used. + """ + if ',' in tube_name: + log.debug("Correcting tube name to 'multi'") + tube_name = 'multi' + connection.watch(tube_name) + connection.ignore('default') + return tube_name + + +def walk_jobs(connection, tube_name, processor, pattern=None): + """ + def callback(jobs_dict) + """ + log.info("Checking Beanstalk Queue...") + job_count = connection.stats_tube(tube_name)['current-jobs-ready'] + if job_count == 0: + log.info('No jobs in Beanstalk Queue') + return + + # Try to figure out a sane timeout based on how many jobs are in the queue + timeout = job_count / 2000.0 * 60 + for i in range(1, job_count + 1): + print_progress(i, job_count, "Loading") + job = connection.reserve(timeout=timeout) + if job is None or job.body is None: + continue + job_config = yaml.safe_load(job.body) + job_name = job_config['name'] + job_id = job.stats()['id'] + if pattern is not None and pattern not in job_name: + continue + processor.add_job(job_id, job_config, job) + end_progress() + processor.complete() + + +def print_progress(index, total, message=None): + msg = "{m} ".format(m=message) if message else '' + sys.stderr.write("{msg}{i}/{total}\r".format( + msg=msg, i=index, total=total)) + sys.stderr.flush() + + +def end_progress(): + sys.stderr.write('\n') + sys.stderr.flush() + + +class JobProcessor(object): + def __init__(self): + self.jobs = OrderedDict() + + def add_job(self, job_id, job_config, job_obj=None): + job_id = str(job_id) + + job_dict = dict( + index=(len(self.jobs) + 1), + job_config=job_config, + ) + if job_obj: + job_dict['job_obj'] = job_obj + self.jobs[job_id] = job_dict + + self.process_job(job_id) + + def process_job(self, job_id): + pass + + def complete(self): + pass + + +class JobPrinter(JobProcessor): + def __init__(self, show_desc=False, full=False): + super(JobPrinter, self).__init__() + self.show_desc = show_desc + self.full = full + + def process_job(self, job_id): + job_config = self.jobs[job_id]['job_config'] + job_index = self.jobs[job_id]['index'] + job_priority = job_config['priority'] + job_name = job_config['name'] + job_desc = job_config['description'] + print('Job: {i:>4} priority: {pri:>4} {job_name}/{job_id}'.format( + i=job_index, + pri=job_priority, + job_id=job_id, + job_name=job_name, + )) + if self.full: + pprint.pprint(job_config) + elif job_desc and self.show_desc: + for desc in job_desc.split(): + print('\t {}'.format(desc)) + + +class RunPrinter(JobProcessor): + def __init__(self): + super(RunPrinter, self).__init__() + self.runs = list() + + def process_job(self, job_id): + run = self.jobs[job_id]['job_config']['name'] + if run not in self.runs: + self.runs.append(run) + print(run) + + +class JobDeleter(JobProcessor): + def __init__(self, pattern): + self.pattern = pattern + super(JobDeleter, self).__init__() + + def add_job(self, job_id, job_config, job_obj=None): + job_name = job_config['name'] + if self.pattern in job_name: + super(JobDeleter, self).add_job(job_id, job_config, job_obj) + + def process_job(self, job_id): + job_config = self.jobs[job_id]['job_config'] + job_name = job_config['name'] + print('Deleting {job_name}/{job_id}'.format( + job_id=job_id, + job_name=job_name, + )) + job_obj = self.jobs[job_id].get('job_obj') + if job_obj: + job_obj.delete() + report.try_delete_jobs(job_name, job_id) + + +def pause_tube(connection, tube, duration): + duration = int(duration) + if not tube: + tubes = sorted(connection.tubes()) + else: + tubes = [tube] + + prefix = 'Unpausing' if duration == 0 else "Pausing for {dur}s" + templ = prefix + ": {tubes}" + log.info(templ.format(dur=duration, tubes=tubes)) + for tube in tubes: + connection.pause_tube(tube, duration) + + +def stats_tube(connection, tube): + stats = connection.stats_tube(tube) + result = dict( + name=tube, + count=stats['current-jobs-ready'], + paused=(stats['pause'] != 0), + ) + return result + + +def main(args): + machine_type = args['--machine_type'] + status = args['--status'] + delete = args['--delete'] + runs = args['--runs'] + show_desc = args['--description'] + full = args['--full'] + pause_duration = args['--pause'] + try: + connection = connect() + if machine_type and not pause_duration: + # watch_tube needs to be run before we inspect individual jobs; + # it is not needed for pausing tubes + watch_tube(connection, machine_type) + if status: + print(json.dumps(stats_tube(connection, machine_type))) + elif pause_duration: + pause_tube(connection, machine_type, pause_duration) + elif delete: + walk_jobs(connection, machine_type, + JobDeleter(delete)) + elif runs: + walk_jobs(connection, machine_type, + RunPrinter()) + else: + walk_jobs(connection, machine_type, + JobPrinter(show_desc=show_desc, full=full)) + except KeyboardInterrupt: + log.info("Interrupted.") + finally: + connection.close() diff --git a/teuthology/ceph.conf.template b/teuthology/ceph.conf.template new file mode 100644 index 0000000000..bdf92863de --- /dev/null +++ b/teuthology/ceph.conf.template @@ -0,0 +1,101 @@ +# XXX +# +# DO NOT MODIFY THIS FILE +# +# This file is a legacy ceph.conf template used only when testing older +# releases of Ceph (pre-Nautilus). The new template exists in ceph.git at +# qa/tasks/ceph.conf.template +# +# XXX + +[global] + chdir = "" + pid file = /var/run/ceph/$cluster-$name.pid + auth supported = cephx + + filestore xattr use omap = true + + mon clock drift allowed = 1.000 + + osd crush chooseleaf type = 0 + auth debug = true + + ms die on old message = true + + mon pg warn min per osd = 1 + mon pg warn max per osd = 10000 # <= luminous + mon max pg per osd = 10000 # >= luminous + mon pg warn max object skew = 0 + + osd pool default size = 2 + + mon osd allow primary affinity = true + mon osd allow pg remap = true + mon warn on legacy crush tunables = false + mon warn on crush straw calc version zero = false + mon warn on no sortbitwise = false + mon warn on osd down out interval zero = false + + osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd" + + osd default data pool replay window = 5 + + mon allow pool delete = true + + mon cluster log file level = debug + debug asserts on shutdown = true + +[osd] + osd journal size = 100 + + osd scrub load threshold = 5.0 + osd scrub max interval = 600 + + osd recover clone overlap = true + osd recovery max chunk = 1048576 + + osd debug shutdown = true + osd debug op order = true + osd debug verify stray on activate = true + + osd open classes on start = true + osd debug pg log writeout = true + + osd deep scrub update digest min age = 30 + + osd map max advance = 10 + + journal zero on create = true + + filestore ondisk finisher threads = 3 + filestore apply finisher threads = 3 + + bdev debug aio = true + osd debug misdirected ops = true + +[mgr] + debug ms = 1 + debug mgr = 20 + debug mon = 20 + debug auth = 20 + mon reweight min pgs per osd = 4 + mon reweight min bytes per osd = 10 + +[mon] + debug ms = 1 + debug mon = 20 + debug paxos = 20 + debug auth = 20 + mon data avail warn = 5 + mon mgr mkfs grace = 120 + mon reweight min pgs per osd = 4 + mon osd reporter subtree level = osd + mon osd prime pg temp = true + mon reweight min bytes per osd = 10 + +[client] + rgw cache enabled = true + rgw enable ops log = true + rgw enable usage log = true + log file = /var/log/ceph/$cluster-$name.$pid.log + admin socket = /var/run/ceph/$cluster-$name.$pid.asok diff --git a/teuthology/config.py b/teuthology/config.py new file mode 100644 index 0000000000..6da6cdd7f1 --- /dev/null +++ b/teuthology/config.py @@ -0,0 +1,288 @@ +import os +import yaml +import logging +try: + from collections.abc import MutableMapping +except ImportError: + from collections import MutableMapping + + +def init_logging(): + log = logging.getLogger(__name__) + return log + +log = init_logging() + + +class YamlConfig(MutableMapping): + """ + A configuration object populated by parsing a yaml file, with optional + default values. + + Note that modifying the _defaults attribute of an instance can potentially + yield confusing results; if you need to do modify defaults, use the class + variable or create a subclass. + """ + _defaults = dict() + + def __init__(self, yaml_path=None): + self.yaml_path = yaml_path + if self.yaml_path: + self.load() + else: + self._conf = dict() + + def load(self, conf=None): + if conf: + if isinstance(conf, dict): + self._conf = conf + else: + self._conf = yaml.safe_load(conf) + return + if os.path.exists(self.yaml_path): + with open(self.yaml_path) as f: + self._conf = yaml.safe_load(f) + else: + log.debug("%s not found", self.yaml_path) + self._conf = dict() + + def update(self, in_dict): + """ + Update an existing configuration using dict.update() + + :param in_dict: The dict to use to update + """ + self._conf.update(in_dict) + + @classmethod + def from_dict(cls, in_dict): + """ + Build a config object from a dict. + + :param in_dict: The dict to use + :returns: The config object + """ + conf_obj = cls() + conf_obj._conf = in_dict + return conf_obj + + def to_dict(self): + """ + :returns: A shallow copy of the configuration as a dict + """ + return dict(self._conf) + + @classmethod + def from_str(cls, in_str): + """ + Build a config object from a string or yaml stream. + + :param in_str: The stream or string + :returns: The config object + """ + conf_obj = cls() + conf_obj._conf = yaml.safe_load(in_str) + return conf_obj + + def to_str(self): + """ + :returns: str(self) + """ + return str(self) + + def get(self, key, default=None): + return self._conf.get(key, default) + + def __str__(self): + return yaml.safe_dump(self._conf, default_flow_style=False).strip() + + def __repr__(self): + return self.__str__() + + def __getitem__(self, name): + return self.__getattr__(name) + + def __getattr__(self, name): + return self._conf.get(name, self._defaults.get(name)) + + def __contains__(self, name): + return self._conf.__contains__(name) + + def __setattr__(self, name, value): + if name.endswith('_conf') or name in ('yaml_path'): + object.__setattr__(self, name, value) + else: + self._conf[name] = value + + def __delattr__(self, name): + del self._conf[name] + + def __len__(self): + return self._conf.__len__() + + def __iter__(self): + return self._conf.__iter__() + + def __setitem__(self, name, value): + self._conf.__setitem__(name, value) + + def __delitem__(self, name): + self._conf.__delitem__(name) + + +class TeuthologyConfig(YamlConfig): + """ + This class is intended to unify teuthology's many configuration files and + objects. Currently it serves as a convenient interface to + ~/.teuthology.yaml and nothing else. + """ + yaml_path = os.path.join(os.path.expanduser('~/.teuthology.yaml')) + _defaults = { + 'archive_base': '/home/teuthworker/archive', + 'archive_upload': None, + 'archive_upload_key': None, + 'archive_upload_url': None, + 'automated_scheduling': False, + 'reserve_machines': 5, + 'ceph_git_base_url': 'https://github.com/ceph/', + 'ceph_git_url': None, + 'ceph_qa_suite_git_url': None, + 'ceph_cm_ansible_git_url': None, + 'use_conserver': False, + 'conserver_master': 'conserver.front.sepia.ceph.com', + 'conserver_port': 3109, + 'gitbuilder_host': 'gitbuilder.ceph.com', + 'githelper_base_url': 'http://git.ceph.com:8080', + 'check_package_signatures': True, + 'job_threshold': 500, + 'lab_domain': 'front.sepia.ceph.com', + 'lock_server': 'http://paddles.front.sepia.ceph.com/', + 'max_job_time': 259200, # 3 days + 'nsupdate_url': 'http://nsupdate.front.sepia.ceph.com/update', + 'results_server': 'http://paddles.front.sepia.ceph.com/', + 'results_ui_server': 'http://pulpito.ceph.com/', + 'results_sending_email': 'teuthology', + 'results_timeout': 43200, + 'src_base_path': os.path.expanduser('~/src'), + 'verify_host_keys': True, + 'watchdog_interval': 120, + 'kojihub_url': 'http://koji.fedoraproject.org/kojihub', + 'kojiroot_url': 'http://kojipkgs.fedoraproject.org/packages', + 'koji_task_url': 'https://kojipkgs.fedoraproject.org/work/', + 'baseurl_template': 'http://{host}/{proj}-{pkg_type}-{dist}-{arch}-{flavor}/{uri}', + 'use_shaman': True, + 'shaman_host': 'shaman.ceph.com', + 'teuthology_path': None, + 'suite_verify_ceph_hash': True, + 'suite_allow_missing_packages': False, + 'openstack': { + 'clone': 'git clone http://github.com/ceph/teuthology', + 'user-data': 'teuthology/openstack/openstack-{os_type}-{os_version}-user-data.txt', + 'ip': '1.1.1.1', + 'machine': { + 'disk': 20, + 'ram': 8000, + 'cpus': 1, + }, + 'volumes': { + 'count': 0, + 'size': 1, + }, + }, + 'rocketchat': None, + 'sleep_before_teardown': 0, + } + + def __init__(self, yaml_path=None): + super(TeuthologyConfig, self).__init__(yaml_path or self.yaml_path) + + def get_ceph_cm_ansible_git_url(self): + return (self.ceph_cm_ansible_git_url or + self.ceph_git_base_url + 'ceph-cm-ansible.git') + + def get_ceph_qa_suite_git_url(self): + return (self.ceph_qa_suite_git_url or + self.get_ceph_git_url()) + + def get_ceph_git_url(self): + return (self.ceph_git_url or + self.ceph_git_base_url + 'ceph-ci.git') + + +class JobConfig(YamlConfig): + pass + + +class FakeNamespace(YamlConfig): + """ + This class is meant to behave like a argparse Namespace + + We'll use this as a stop-gap as we refactor commands but allow the tasks + to still be passed a single namespace object for the time being. + """ + def __init__(self, config_dict=None): + if not config_dict: + config_dict = dict() + self._conf = self._clean_config(config_dict) + set_config_attr(self) + + def _clean_config(self, config_dict): + """ + Makes sure that the keys of config_dict are able to be used. For + example the "--" prefix of a docopt dict isn't valid and won't populate + correctly. + """ + result = dict() + for key, value in config_dict.items(): + new_key = key + if new_key.startswith("--"): + new_key = new_key[2:] + elif new_key.startswith("<") and new_key.endswith(">"): + new_key = new_key[1:-1] + + if "-" in new_key: + new_key = new_key.replace("-", "_") + + result[new_key] = value + + return result + + def __getattr__(self, name): + """ + We need to modify this for FakeNamespace so that getattr() will + work correctly on a FakeNamespace instance. + """ + if name in self._conf: + return self._conf[name] + elif name in self._defaults: + return self._defaults[name] + raise AttributeError(name) + + def __setattr__(self, name, value): + if name == 'teuthology_config': + object.__setattr__(self, name, value) + else: + super(FakeNamespace, self).__setattr__(name, value) + + def __repr__(self): + return repr(self._conf) + + def __str__(self): + return str(self._conf) + + +def set_config_attr(obj): + """ + Set obj.teuthology_config, mimicking the old behavior of misc.read_config + """ + obj.teuthology_config = config + + +def _get_config_path(): + system_config_path = '/etc/teuthology.yaml' + if not os.path.exists(TeuthologyConfig.yaml_path) and \ + os.path.exists(system_config_path): + return system_config_path + return TeuthologyConfig.yaml_path + +config = TeuthologyConfig(yaml_path=_get_config_path()) diff --git a/teuthology/contextutil.py b/teuthology/contextutil.py new file mode 100644 index 0000000000..57a0f08c86 --- /dev/null +++ b/teuthology/contextutil.py @@ -0,0 +1,145 @@ +import contextlib +import sys +import logging +import time +import itertools + +from teuthology.config import config +from teuthology.exceptions import MaxWhileTries + + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def nested(*managers): + """ + Like contextlib.nested but takes callables returning context + managers, to avoid the major reason why contextlib.nested was + deprecated. + + This version also logs any exceptions early, much like run_tasks, + to ease debugging. TODO combine nested and run_tasks. + """ + exits = [] + vars = [] + exc = (None, None, None) + try: + for mgr_fn in managers: + mgr = mgr_fn() + exit = mgr.__exit__ + enter = mgr.__enter__ + vars.append(enter()) + exits.append(exit) + yield vars + except Exception: + log.exception('Saw exception from nested tasks') + exc = sys.exc_info() + # FIXME this needs to be more generic + if config.ctx and config.ctx.config.get('interactive-on-error'): + config.ctx.config['interactive-on-error'] = False + from teuthology.task import interactive + log.warning('Saw failure, going into interactive mode...') + interactive.task(ctx=config.ctx, config=None) + finally: + while exits: + exit = exits.pop() + try: + if exit(*exc): + exc = (None, None, None) + except Exception: + exc = sys.exc_info() + if exc != (None, None, None): + # Don't rely on sys.exc_info() still containing + # the right information. Another exception may + # have been raised and caught by an exit method + raise exc[1] + + +class safe_while(object): + """ + A context manager to remove boiler plate code that deals with `while` loops + that need a given number of tries and some seconds to sleep between each + one of those tries. + + The most simple example possible will try 10 times sleeping for 6 seconds: + + >>> from teuthology.contexutil import safe_while + >>> with safe_while() as proceed: + ... while proceed(): + ... # repetitive code here + ... print("hello world") + ... + Traceback (most recent call last): + ... + MaxWhileTries: reached maximum tries (5) after waiting for 75 seconds + + Yes, this adds yet another level of indentation but it allows you to + implement while loops exactly the same as before with just 1 more + indentation level and one extra call. Everything else stays the same, + code-wise. So adding this helper to existing code is simpler. + + :param sleep: The amount of time to sleep between tries. Default 6 + :param increment: The amount to add to the sleep value on each try. + Default 0. + :param tries: The amount of tries before giving up. Default 10. + :param action: The name of the action being attempted. Default none. + :param _raise: Whether to raise an exception (or log a warning). + Default True. + :param _sleeper: The function to use to sleep. Only used for testing. + Default time.sleep + """ + + def __init__(self, sleep=6, increment=0, tries=10, action=None, + _raise=True, _sleeper=None): + self.sleep = sleep + self.increment = increment + self.tries = tries + self.counter = 0 + self.sleep_current = sleep + self.action = action + self._raise = _raise + self.sleeper = _sleeper or time.sleep + + def _make_error_msg(self): + """ + Sum the total number of seconds we waited while providing the number + of tries we attempted + """ + total_seconds_waiting = sum( + itertools.islice( + itertools.count(self.sleep, self.increment), + self.tries + ) + ) + msg = 'reached maximum tries ({tries})' + \ + ' after waiting for {total} seconds' + if self.action: + msg = "'{action}' " + msg + + msg = msg.format( + action=self.action, + tries=self.tries, + total=total_seconds_waiting, + ) + return msg + + def __call__(self): + self.counter += 1 + if self.counter == 1: + return True + if self.counter > self.tries: + error_msg = self._make_error_msg() + if self._raise: + raise MaxWhileTries(error_msg) + else: + log.warning(error_msg) + return False + self.sleeper(self.sleep_current) + self.sleep_current += self.increment + return True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False diff --git a/teuthology/describe_tests.py b/teuthology/describe_tests.py new file mode 100644 index 0000000000..3ea7d71b6c --- /dev/null +++ b/teuthology/describe_tests.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- + +import csv +import json +from prettytable import PrettyTable, FRAME, ALL +import os +import sys +import yaml + +import random +from distutils.util import strtobool + +from teuthology.exceptions import ParseError +from teuthology.suite.build_matrix import \ + build_matrix, generate_combinations, _get_matrix +from teuthology.suite import util, merge + +def main(args): + try: + describe_tests(args) + except ParseError: + sys.exit(1) + + +def describe_tests(args): + suite_dir = os.path.abspath(args[""]) + output_format = args['--format'] + + conf=dict() + rename_args = { + 'filter': 'filter_in', + } + for (key, value) in args.items(): + key = key.lstrip('--').replace('-', '_') + key = rename_args.get(key) or key + if key in ('filter_all', 'filter_in', 'filter_out', 'fields'): + if not value: + value = [] + else: + value = [_ for _ in + (x.strip() for x in value.split(',')) if _] + elif key in ('limit'): + value = int(value) + elif key in ('seed'): + value = int(value) + if value < 0: + value = None + elif key == 'subset' and value is not None: + # take input string '2/3' and turn into (2, 3) + value = tuple(map(int, value.split('/'))) + elif key in ('show_facet'): + value = strtobool(value) + conf[key] = value + + if args['--combinations']: + headers, rows = get_combinations(suite_dir, + limit=conf['limit'], + seed=conf['seed'], + subset=conf['subset'], + no_nested_subset=conf['no_nested_subset'], + fields=conf['fields'], + filter_in=conf['filter_in'], + filter_out=conf['filter_out'], + filter_all=conf['filter_all'], + filter_fragments=conf['filter_fragments'], + include_facet=conf['show_facet']) + hrule = ALL + elif args['--summary']: + output_summary(suite_dir, + limit=conf['limit'], + seed=conf['seed'], + subset=conf['subset'], + no_nested_subset=conf['no_nested_subset'], + show_desc=conf['print_description'], + show_frag=conf['print_fragments'], + filter_in=conf['filter_in'], + filter_out=conf['filter_out'], + filter_all=conf['filter_all'], + filter_fragments=conf['filter_fragments']) + exit(0) + else: + headers, rows = describe_suite(suite_dir, conf['fields'], conf['show_facet'], + output_format) + hrule = FRAME + + output_results(headers, rows, output_format, hrule) + + +def output_results(headers, rows, output_format, hrule): + """ + Write the headers and rows given in the specified output format to + stdout. + """ + if output_format == 'json': + objects = [{k: v for k, v in zip(headers, row) if v} + for row in rows] + print(json.dumps(dict(headers=headers, data=objects))) + elif output_format == 'csv': + writer = csv.writer(sys.stdout) + writer.writerows([headers] + rows) + else: + table = PrettyTable(headers) + table.align = 'l' + table.vrules = ALL + table.hrules = hrule + for row in rows: + table.add_row(row) + print(table) + + +def output_summary(path, limit=0, + seed=None, + subset=None, + no_nested_subset=None, + show_desc=True, + show_frag=False, + show_matrix=False, + filter_in=None, + filter_out=None, + filter_all=None, + filter_fragments=True): + """ + Prints number of all facets for a given suite for inspection, + taking into accout such options like --subset, --filter, + --filter-out and --filter-all. Optionally dumps matrix objects, + yaml files which is used for generating combinations. + """ + + random.seed(seed) + mat, first, matlimit = _get_matrix(path, subset=subset, no_nested_subset=no_nested_subset) + configs = generate_combinations(path, mat, first, matlimit) + count = 0 + total = len(configs) + suite = os.path.basename(path) + configs = merge.config_merge(configs, + suite_name=suite, + filter_in=filter_in, + filter_out=filter_out, + filter_all=filter_all, + filter_fragments=filter_fragments) + for c in configs: + if limit and count >= limit: + break + count += 1 + if show_desc or show_frag: + print("{}".format(c[0])) + if show_frag: + for path in c[1]: + print(" {}".format(util.strip_fragment_path(path))) + if show_matrix: + print(mat.tostr(1)) + print("# {}/{} {}".format(count, total, path)) + +def get_combinations(suite_dir, + limit=0, + seed=None, + subset=None, + no_nested_subset=False, + fields=[], + filter_in=None, + filter_out=None, + filter_all=None, + filter_fragments=False, + include_facet=True): + """ + Describes the combinations of a suite, optionally limiting + or filtering output based on the given parameters. Includes + columns for the subsuite and facets when include_facet is True. + + Returns a tuple of (headers, rows) where both elements are lists + of strings. + """ + suite = os.path.basename(suite_dir) + configs = build_matrix(suite_dir, subset=subset, no_nested_subset=no_nested_subset, seed=seed) + + num_listed = 0 + rows = [] + + facet_headers = set() + dirs = {} + max_dir_depth = 0 + + configs = merge.config_merge(configs, + suite_name=suite, + filter_in=filter_in, + filter_out=filter_out, + filter_all=filter_all, + filter_fragments=filter_fragments) + for _, fragment_paths, __ in configs: + if limit > 0 and num_listed >= limit: + break + + fragment_fields = [extract_info(path, fields) + for path in fragment_paths] + + # merge fields from multiple fragments by joining their values with \n + metadata = {} + for fragment_meta in fragment_fields: + for field, value in fragment_meta.items(): + if value == '': + continue + if field in metadata: + metadata[field] += '\n' + str(value) + else: + metadata[field] = str(value) + + if include_facet: + # map final dir (facet) -> filename without the .yaml suffix + for path in fragment_paths: + facet_dir = os.path.dirname(path) + facet = os.path.basename(facet_dir) + metadata[facet] = os.path.basename(path)[:-5] + facet_headers.add(facet) + facet_dirs = facet_dir.split('/')[:-1] + for i, dir_ in enumerate(facet_dirs): + if i not in dirs: + dirs[i] = set() + dirs[i].add(dir_) + metadata['_dir_' + str(i)] = os.path.basename(dir_) + max_dir_depth = max(max_dir_depth, i) + + rows.append(metadata) + num_listed += 1 + + subsuite_headers = [] + if include_facet: + first_subsuite_depth = max_dir_depth + for i in range(max_dir_depth): + if len(dirs[i]) > 1: + first_subsuite_depth = i + break + + subsuite_headers = ['subsuite depth ' + str(i) + for i in + range(0, max_dir_depth - first_subsuite_depth + 1)] + + for row in rows: + for i in range(first_subsuite_depth, max_dir_depth + 1): + row[subsuite_headers[i - first_subsuite_depth]] = \ + row.get('_dir_' + str(i), '') + + headers = subsuite_headers + sorted(facet_headers) + fields + return headers, sorted([[row.get(field, '') for field in headers] + for row in rows]) + + +def describe_suite(suite_dir, fields, include_facet, output_format): + """ + Describe a suite listing each subdirectory and file once as a + separate row. + + Returns a tuple of (headers, rows) where both elements are lists + of strings. + + """ + rows = tree_with_info(suite_dir, fields, include_facet, '', [], + output_format=output_format) + + headers = ['path'] + if include_facet: + headers.append('facet') + return headers + fields, rows + + +def extract_info(file_name, fields): + """ + Read a yaml file and return a dictionary mapping the fields to the + values of those fields in the file. + + The returned dictionary will always contain all the provided + fields, mapping any non-existent ones to ''. + + Assumes fields are set in a format of: + + {'meta': [{'field' : value, 'field2' : value2}] + + or in yaml: + + meta: + - field: value + field2: value2 + + If 'meta' is present but not in this format, prints an error + message and raises ParseError. + """ + empty_result = {f: '' for f in fields} + if os.path.isdir(file_name) or not file_name.endswith('.yaml'): + return empty_result + + with open(file_name, 'r') as f: + parsed = yaml.safe_load(f) + + if not isinstance(parsed, dict): + return empty_result + + meta = parsed.get('meta', [{}]) + if not (isinstance(meta, list) and + len(meta) == 1 and + isinstance(meta[0], dict)): + print('Error in meta format in %s' % file_name) + print('Meta must be a list containing exactly one dict.') + print('Meta is: %s' % meta) + raise ParseError() + + return {field: meta[0].get(field, '') for field in fields} + + +def path_relative_to_suites(path): + """ + Attempt to trim the ceph-qa-suite root directory from the beginning + of a path. + """ + try: + root = os.path.join('ceph-qa-suite', 'suites') + return path[path.index(root) + len(root):] + except ValueError: + return path + + +def tree_with_info(cur_dir, fields, include_facet, prefix, rows, + output_format='plain'): + """ + Gather fields from all files and directories in cur_dir. + Returns a list of strings for each path containing: + + 1) the path relative to ceph-qa-suite/suites (or the basename with + a /usr/bin/tree-like prefix if output_format is plain) + 2) the facet containing the path (if include_facet is True) + 3) the values of the provided fields in the path ('' is used for + missing values) in the same order as the provided fields + """ + files = sorted(os.listdir(cur_dir)) + has_yamls = any([x.endswith('.yaml') for x in files]) + facet = os.path.basename(cur_dir) if has_yamls else '' + for i, f in enumerate(files): + # skip any hidden files + if f.startswith('.'): + continue + path = os.path.join(cur_dir, f) + if i == len(files) - 1: + file_pad = '└── ' + dir_pad = ' ' + else: + file_pad = '├── ' + dir_pad = '│ ' + info = extract_info(path, fields) + tree_node = prefix + file_pad + f + if output_format != 'plain': + tree_node = path_relative_to_suites(path) + meta = [info[f] for f in fields] + row = [tree_node] + if include_facet: + row.append(facet) + rows.append(row + meta) + if os.path.isdir(path): + tree_with_info(path, fields, include_facet, + prefix + dir_pad, rows, output_format) + return rows diff --git a/teuthology/dispatcher/__init__.py b/teuthology/dispatcher/__init__.py new file mode 100644 index 0000000000..14218835b1 --- /dev/null +++ b/teuthology/dispatcher/__init__.py @@ -0,0 +1,202 @@ +import logging +import os +import subprocess +import sys +import yaml + +from datetime import datetime + +from teuthology import setup_log_file, install_except_hook +from teuthology import beanstalk +from teuthology import report +from teuthology.config import config as teuth_config +from teuthology.exceptions import SkipJob +from teuthology.repo_utils import fetch_qa_suite, fetch_teuthology +from teuthology.lock.ops import block_and_lock_machines +from teuthology.dispatcher import supervisor +from teuthology.worker import prep_job +from teuthology import safepath +from teuthology.nuke import nuke + +log = logging.getLogger(__name__) +start_time = datetime.utcnow() +restart_file_path = '/tmp/teuthology-restart-dispatcher' +stop_file_path = '/tmp/teuthology-stop-dispatcher' + + +def sentinel(path): + if not os.path.exists(path): + return False + file_mtime = datetime.utcfromtimestamp(os.path.getmtime(path)) + return file_mtime > start_time + + +def restart(): + log.info('Restarting...') + args = sys.argv[:] + args.insert(0, sys.executable) + os.execv(sys.executable, args) + + +def stop(): + log.info('Stopping...') + sys.exit(0) + + +def load_config(archive_dir=None): + teuth_config.load() + if archive_dir is not None: + if not os.path.isdir(archive_dir): + sys.exit("{prog}: archive directory must exist: {path}".format( + prog=os.path.basename(sys.argv[0]), + path=archive_dir, + )) + else: + teuth_config.archive_base = archive_dir + + +def main(args): + # run dispatcher in job supervisor mode if --supervisor passed + if args["--supervisor"]: + return supervisor.main(args) + + verbose = args["--verbose"] + tube = args["--tube"] + log_dir = args["--log-dir"] + archive_dir = args["--archive-dir"] + exit_on_empty_queue = args["--exit-on-empty-queue"] + + if archive_dir is None: + archive_dir = teuth_config.archive_base + + # setup logging for disoatcher in {log_dir} + loglevel = logging.INFO + if verbose: + loglevel = logging.DEBUG + log.setLevel(loglevel) + log_file_path = os.path.join(log_dir, f"dispatcher.{tube}.{os.getpid()}") + setup_log_file(log_file_path) + install_except_hook() + + load_config(archive_dir=archive_dir) + + connection = beanstalk.connect() + beanstalk.watch_tube(connection, tube) + result_proc = None + + if teuth_config.teuthology_path is None: + fetch_teuthology('main') + fetch_qa_suite('main') + + keep_running = True + job_procs = set() + while keep_running: + # Check to see if we have a teuthology-results process hanging around + # and if so, read its return code so that it can exit. + if result_proc is not None and result_proc.poll() is not None: + log.debug("teuthology-results exited with code: %s", + result_proc.returncode) + result_proc = None + + if sentinel(restart_file_path): + restart() + elif sentinel(stop_file_path): + stop() + + load_config() + job_procs = set(filter(lambda p: p.poll() is None, job_procs)) + job = connection.reserve(timeout=60) + if job is None: + if exit_on_empty_queue and not job_procs: + log.info("Queue is empty and no supervisor processes running; exiting!") + break + continue + + # bury the job so it won't be re-run if it fails + job.bury() + job_id = job.jid + log.info('Reserved job %d', job_id) + log.info('Config is: %s', job.body) + job_config = yaml.safe_load(job.body) + job_config['job_id'] = str(job_id) + + if job_config.get('stop_worker'): + keep_running = False + + try: + job_config, teuth_bin_path = prep_job( + job_config, + log_file_path, + archive_dir, + ) + except SkipJob: + continue + + # lock machines but do not reimage them + if 'roles' in job_config: + job_config = lock_machines(job_config) + + run_args = [ + os.path.join(teuth_bin_path, 'teuthology-dispatcher'), + '--supervisor', + '-v', + '--bin-path', teuth_bin_path, + '--archive-dir', archive_dir, + ] + + # Create run archive directory if not already created and + # job's archive directory + create_job_archive(job_config['name'], + job_config['archive_path'], + archive_dir) + job_config_path = os.path.join(job_config['archive_path'], 'orig.config.yaml') + + # Write initial job config in job archive dir + with open(job_config_path, 'w') as f: + yaml.safe_dump(job_config, f, default_flow_style=False) + + run_args.extend(["--job-config", job_config_path]) + + try: + job_proc = subprocess.Popen(run_args) + job_procs.add(job_proc) + log.info('Job supervisor PID: %s', job_proc.pid) + except Exception: + error_message = "Saw error while trying to spawn supervisor." + log.exception(error_message) + if 'targets' in job_config: + nuke(supervisor.create_fake_context(job_config), True) + report.try_push_job_info(job_config, dict( + status='fail', + failure_reason=error_message)) + + # This try/except block is to keep the worker from dying when + # beanstalkc throws a SocketError + try: + job.delete() + except Exception: + log.exception("Saw exception while trying to delete job") + + returncodes = set([0]) + for proc in job_procs: + if proc.returncode is not None: + returncodes.add(proc.returncode) + return max(returncodes) + + +def lock_machines(job_config): + report.try_push_job_info(job_config, dict(status='running')) + fake_ctx = supervisor.create_fake_context(job_config, block=True) + block_and_lock_machines(fake_ctx, len(job_config['roles']), + job_config['machine_type'], reimage=False) + job_config = fake_ctx.config + return job_config + + +def create_job_archive(job_name, job_archive_path, archive_dir): + log.info('Creating job\'s archive dir %s', job_archive_path) + safe_archive = safepath.munge(job_name) + run_archive = os.path.join(archive_dir, safe_archive) + if not os.path.exists(run_archive): + safepath.makedirs('/', run_archive) + safepath.makedirs('/', job_archive_path) diff --git a/teuthology/dispatcher/supervisor.py b/teuthology/dispatcher/supervisor.py new file mode 100644 index 0000000000..d7a695475d --- /dev/null +++ b/teuthology/dispatcher/supervisor.py @@ -0,0 +1,351 @@ +import logging +import os +import subprocess +import time +import yaml +import requests + +from urllib.parse import urljoin +from datetime import datetime + +import teuthology +from teuthology import report +from teuthology import safepath +from teuthology.config import config as teuth_config +from teuthology.exceptions import SkipJob +from teuthology import setup_log_file, install_except_hook +from teuthology.lock.ops import reimage_machines +from teuthology.misc import get_user, archive_logs, compress_logs +from teuthology.config import FakeNamespace +from teuthology.job_status import get_status +from teuthology.nuke import nuke +from teuthology.kill import kill_job +from teuthology.task.internal import add_remotes +from teuthology.misc import decanonicalize_hostname as shortname +from teuthology.lock import query + +log = logging.getLogger(__name__) + + +def main(args): + + verbose = args["--verbose"] + archive_dir = args["--archive-dir"] + teuth_bin_path = args["--bin-path"] + config_file_path = args["--job-config"] + + with open(config_file_path, 'r') as config_file: + job_config = yaml.safe_load(config_file) + + loglevel = logging.INFO + if verbose: + loglevel = logging.DEBUG + log.setLevel(loglevel) + + log_file_path = os.path.join(job_config['archive_path'], + f"supervisor.{job_config['job_id']}.log") + setup_log_file(log_file_path) + install_except_hook() + + # reimage target machines before running the job + if 'targets' in job_config: + reimage(job_config) + with open(config_file_path, 'w') as f: + yaml.safe_dump(job_config, f, default_flow_style=False) + + try: + return run_job( + job_config, + teuth_bin_path, + archive_dir, + verbose + ) + except SkipJob: + return 0 + + +def run_job(job_config, teuth_bin_path, archive_dir, verbose): + safe_archive = safepath.munge(job_config['name']) + if job_config.get('first_in_suite') or job_config.get('last_in_suite'): + job_archive = os.path.join(archive_dir, safe_archive) + args = [ + os.path.join(teuth_bin_path, 'teuthology-results'), + '--archive-dir', job_archive, + '--name', job_config['name'], + ] + if job_config.get('first_in_suite'): + log.info('Generating memo for %s', job_config['name']) + if job_config.get('seed'): + args.extend(['--seed', job_config['seed']]) + if job_config.get('subset'): + args.extend(['--subset', job_config['subset']]) + if job_config.get('no_nested_subset'): + args.extend(['--no-nested-subset']) + else: + log.info('Generating results for %s', job_config['name']) + timeout = job_config.get('results_timeout', + teuth_config.results_timeout) + args.extend(['--timeout', str(timeout)]) + if job_config.get('email'): + args.extend(['--email', job_config['email']]) + # Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to + # make sure that it will continue to run if this worker process + # dies (e.g. because of a restart) + result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp) + log.info("teuthology-results PID: %s", result_proc.pid) + # Remove unnecessary logs for first and last jobs in run + log.info('Deleting job\'s archive dir %s', job_config['archive_path']) + for f in os.listdir(job_config['archive_path']): + os.remove(os.path.join(job_config['archive_path'], f)) + os.rmdir(job_config['archive_path']) + return + + log.info('Running job %s', job_config['job_id']) + + arg = [ + os.path.join(teuth_bin_path, 'teuthology'), + ] + # The following is for compatibility with older schedulers, from before we + # started merging the contents of job_config['config'] into job_config + # itself. + if 'config' in job_config: + inner_config = job_config.pop('config') + if not isinstance(inner_config, dict): + log.warning("run_job: job_config['config'] isn't a dict, it's a %s", + str(type(inner_config))) + else: + job_config.update(inner_config) + + if verbose or job_config['verbose']: + arg.append('-v') + + arg.extend([ + '--owner', job_config['owner'], + '--archive', job_config['archive_path'], + '--name', job_config['name'], + ]) + if job_config['description'] is not None: + arg.extend(['--description', job_config['description']]) + job_archive = os.path.join(job_config['archive_path'], 'orig.config.yaml') + arg.extend(['--', job_archive]) + + log.debug("Running: %s" % ' '.join(arg)) + p = subprocess.Popen(args=arg) + log.info("Job archive: %s", job_config['archive_path']) + log.info("Job PID: %s", str(p.pid)) + + if teuth_config.results_server: + log.info("Running with watchdog") + try: + run_with_watchdog(p, job_config) + except Exception: + log.exception("run_with_watchdog had an unhandled exception") + raise + else: + log.info("Running without watchdog") + # This sleep() is to give the child time to start up and create the + # archive dir. + time.sleep(5) + p.wait() + + if p.returncode != 0: + log.error('Child exited with code %d', p.returncode) + else: + log.info('Success!') + if 'targets' in job_config: + unlock_targets(job_config) + return p.returncode + +def failure_is_reimage(failure_reason): + if not failure_reason: + return False + reimage_failure = "Error reimaging machines:" + if reimage_failure in failure_reason: + return True + else: + return False + +def check_for_reimage_failures_and_mark_down(targets, count=10): + # Grab paddles history of jobs in the machine + # and count the number of reimaging errors + # if it fails N times then mark the machine down + base_url = teuth_config.results_server + for k, _ in targets.items(): + machine = k.split('@')[-1] + url = urljoin( + base_url, + '/nodes/{0}/jobs/?count={1}'.format( + machine, count) + ) + resp = requests.get(url) + jobs = resp.json() + if len(jobs) < count: + continue + reimage_failures = list(filter( + lambda j: failure_is_reimage(j['failure_reason']), + jobs + )) + if len(reimage_failures) < count: + continue + # Mark machine down + machine_name = shortname(k) + teuthology.lock.ops.update_lock( + machine_name, + description='reimage failed {0} times'.format(count), + status='down', + ) + log.error( + 'Reimage failed {0} times ... marking machine down'.format(count) + ) + +def reimage(job_config): + # Reimage the targets specified in job config + # and update their keys in config after reimaging + ctx = create_fake_context(job_config) + # change the status during the reimaging process + report.try_push_job_info(ctx.config, dict(status='waiting')) + targets = job_config['targets'] + try: + reimaged = reimage_machines(ctx, targets, job_config['machine_type']) + except Exception as e: + log.exception('Reimaging error. Nuking machines...') + # Reimage failures should map to the 'dead' status instead of 'fail' + report.try_push_job_info(ctx.config, dict(status='dead', failure_reason='Error reimaging machines: ' + str(e))) + nuke(ctx, True) + # Machine that fails to reimage after 10 times will be marked down + check_for_reimage_failures_and_mark_down(targets) + raise + ctx.config['targets'] = reimaged + # change the status to running after the reimaging process + report.try_push_job_info(ctx.config, dict(status='running')) + + +def unlock_targets(job_config): + serializer = report.ResultsSerializer(teuth_config.archive_base) + job_info = serializer.job_info(job_config['name'], job_config['job_id']) + machine_statuses = query.get_statuses(job_info['targets'].keys()) + # only unlock/nuke targets if locked and description matches + locked = [] + for status in machine_statuses: + name = shortname(status['name']) + description = status['description'] + if not status['locked']: + continue + if description != job_info['archive_path']: + log.warning( + "Was going to unlock %s but it was locked by another job: %s", + name, description + ) + continue + locked.append(name) + if not locked: + return + job_status = get_status(job_info) + if job_status == 'pass' or \ + (job_config.get('unlock_on_failure', False) and not job_config.get('nuke-on-error', False)): + log.info('Unlocking machines...') + fake_ctx = create_fake_context(job_config) + for machine in locked: + teuthology.lock.ops.unlock_one(fake_ctx, + machine, job_info['owner'], + job_info['archive_path']) + if job_status != 'pass' and job_config.get('nuke-on-error', False): + log.info('Nuking machines...') + fake_ctx = create_fake_context(job_config) + nuke(fake_ctx, True) + + +def run_with_watchdog(process, job_config): + job_start_time = datetime.utcnow() + + # Only push the information that's relevant to the watchdog, to save db + # load + job_info = dict( + name=job_config['name'], + job_id=job_config['job_id'], + ) + + # Sleep once outside of the loop to avoid double-posting jobs + time.sleep(teuth_config.watchdog_interval) + hit_max_timeout = False + while process.poll() is None: + # Kill jobs that have been running longer than the global max + run_time = datetime.utcnow() - job_start_time + total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds + if total_seconds > teuth_config.max_job_time: + hit_max_timeout = True + log.warning("Job ran longer than {max}s. Killing...".format( + max=teuth_config.max_job_time)) + try: + # kill processes but do not nuke yet so we can save + # the logs, coredumps, etc. + kill_job(job_info['name'], job_info['job_id'], + teuth_config.archive_base, job_config['owner'], + skip_nuke=True) + except Exception: + log.exception('Failed to kill job') + + try: + transfer_archives(job_info['name'], job_info['job_id'], + teuth_config.archive_base, job_config) + except Exception: + log.exception('Could not save logs') + + try: + # this time remove everything and unlock the machines + kill_job(job_info['name'], job_info['job_id'], + teuth_config.archive_base, job_config['owner']) + except Exception: + log.exception('Failed to kill job and unlock machines') + + # calling this without a status just updates the jobs updated time + report.try_push_job_info(job_info) + time.sleep(teuth_config.watchdog_interval) + + # we no longer support testing theses old branches + assert(job_config.get('teuthology_branch') not in ('argonaut', 'bobtail', + 'cuttlefish', 'dumpling')) + + # Let's make sure that paddles knows the job is finished. We don't know + # the status, but if it was a pass or fail it will have already been + # reported to paddles. In that case paddles ignores the 'dead' status. + # If the job was killed, paddles will use the 'dead' status. + extra_info = dict(status='dead') + if hit_max_timeout: + extra_info['failure_reason'] = 'hit max job timeout' + report.try_push_job_info(job_info, extra_info) + + +def create_fake_context(job_config, block=False): + owner = job_config.get('owner', get_user()) + os_version = job_config.get('os_version', None) + + ctx_args = { + 'config': job_config, + 'block': block, + 'owner': owner, + 'archive': job_config['archive_path'], + 'machine_type': job_config['machine_type'], + 'os_type': job_config.get('os_type', 'ubuntu'), + 'os_version': os_version, + 'name': job_config['name'], + } + + return FakeNamespace(ctx_args) + + +def transfer_archives(run_name, job_id, archive_base, job_config): + serializer = report.ResultsSerializer(archive_base) + job_info = serializer.job_info(run_name, job_id, simple=True) + + if 'archive' in job_info: + ctx = create_fake_context(job_config) + add_remotes(ctx, job_config) + + for log_type, log_path in job_info['archive'].items(): + if log_type == 'init': + log_type = '' + compress_logs(ctx, log_path) + archive_logs(ctx, log_path, log_type) + else: + log.info('No archives to transfer.') diff --git a/teuthology/dispatcher/test/test_reimage_error_mark_machine_down.py b/teuthology/dispatcher/test/test_reimage_error_mark_machine_down.py new file mode 100644 index 0000000000..f2365174e7 --- /dev/null +++ b/teuthology/dispatcher/test/test_reimage_error_mark_machine_down.py @@ -0,0 +1,104 @@ +from teuthology.dispatcher import supervisor +from unittest.mock import patch + +class TestCheckReImageFailureMarkDown(object): + def setup(self): + self.the_function = supervisor.check_for_reimage_failures_and_mark_down + + def create_n_out_of_10_reimage_failed_jobs(self, n): + ret_list = [] + for i in range(n): + obj1 = { + "failure_reason":"Error reimaging machines: Manually raised error" + } + ret_list.append(obj1) + for j in range(10-n): + obj2 = {"failure_reason":"Error something else: dummy"} + ret_list.append(obj2) + return ret_list + + @patch('teuthology.dispatcher.supervisor.shortname') + @patch('teuthology.lock.ops.update_lock') + @patch('teuthology.dispatcher.supervisor.requests') + @patch('teuthology.dispatcher.supervisor.urljoin') + @patch('teuthology.dispatcher.supervisor.teuth_config') + def test_one_machine_ten_reimage_failed_jobs( + self, + m_t_config, + m_urljoin, + m_requests, + mark_down, + shortname + ): + targets = {'fakeos@rmachine061.front.sepia.ceph.com': 'ssh-ed25519'} + m_requests.get.return_value.json.return_value = \ + self.create_n_out_of_10_reimage_failed_jobs(10) + shortname.return_value = 'rmachine061' + self.the_function(targets) + assert mark_down.called + + @patch('teuthology.dispatcher.supervisor.shortname') + @patch('teuthology.lock.ops.update_lock') + @patch('teuthology.dispatcher.supervisor.requests') + @patch('teuthology.dispatcher.supervisor.urljoin') + @patch('teuthology.dispatcher.supervisor.teuth_config') + def test_one_machine_seven_reimage_failed_jobs( + self, + m_t_config, + m_urljoin, + m_requests, + mark_down, + shortname, + ): + targets = {'fakeos@rmachine061.front.sepia.ceph.com': 'ssh-ed25519'} + m_requests.get.return_value.json.return_value = \ + self.create_n_out_of_10_reimage_failed_jobs(7) + shortname.return_value = 'rmachine061' + self.the_function(targets) + assert mark_down.called is False + + @patch('teuthology.dispatcher.supervisor.shortname') + @patch('teuthology.lock.ops.update_lock') + @patch('teuthology.dispatcher.supervisor.requests') + @patch('teuthology.dispatcher.supervisor.urljoin') + @patch('teuthology.dispatcher.supervisor.teuth_config') + def test_two_machine_all_reimage_failed_jobs( + self, + m_t_config, + m_urljoin, + m_requests, + mark_down, + shortname, + ): + targets = {'fakeos@rmachine061.front.sepia.ceph.com': 'ssh-ed25519', + 'fakeos@rmachine179.back.sepia.ceph.com': 'ssh-ed45333'} + m_requests.get.return_value.json.side_effect = \ + [self.create_n_out_of_10_reimage_failed_jobs(10), + self.create_n_out_of_10_reimage_failed_jobs(10)] + shortname.return_value.side_effect = ['rmachine061', 'rmachine179'] + self.the_function(targets) + assert mark_down.call_count == 2 + + @patch('teuthology.dispatcher.supervisor.shortname') + @patch('teuthology.lock.ops.update_lock') + @patch('teuthology.dispatcher.supervisor.requests') + @patch('teuthology.dispatcher.supervisor.urljoin') + @patch('teuthology.dispatcher.supervisor.teuth_config') + def test_two_machine_one_healthy_one_reimage_failure( + self, + m_t_config, + m_urljoin, + m_requests, + mark_down, + shortname, + ): + targets = {'fakeos@rmachine061.front.sepia.ceph.com': 'ssh-ed25519', + 'fakeos@rmachine179.back.sepia.ceph.com': 'ssh-ed45333'} + m_requests.get.return_value.json.side_effect = \ + [self.create_n_out_of_10_reimage_failed_jobs(0), + self.create_n_out_of_10_reimage_failed_jobs(10)] + shortname.return_value.side_effect = ['rmachine061', 'rmachine179'] + self.the_function(targets) + assert mark_down.call_count == 1 + assert mark_down.call_args_list[0][0][0].startswith('rmachine179') + diff --git a/teuthology/exceptions.py b/teuthology/exceptions.py new file mode 100644 index 0000000000..c02eed4f68 --- /dev/null +++ b/teuthology/exceptions.py @@ -0,0 +1,231 @@ +class BranchNotFoundError(ValueError): + def __init__(self, branch, repo=None): + self.branch = branch + self.repo = repo + + def __str__(self): + if self.repo: + repo_str = " in repo: %s" % self.repo + else: + repo_str = "" + return "Branch '{branch}' not found{repo_str}!".format( + branch=self.branch, repo_str=repo_str) + + +class BranchMismatchError(ValueError): + def __init__(self, branch, repo, reason=None): + self.branch = branch + self.repo = repo + self.reason = reason + + def __str__(self): + msg = f"Cannot use branch {self.branch} with repo {self.repo}" + if self.reason: + msg = f"{msg} because {self.reason}" + return msg + +class CommitNotFoundError(ValueError): + def __init__(self, commit, repo=None): + self.commit = commit + self.repo = repo + + def __str__(self): + if self.repo: + repo_str = " in repo: %s" % self.repo + else: + repo_str = "" + return "'{commit}' not found{repo_str}!".format( + commit=self.commit, repo_str=repo_str) + + +class GitError(RuntimeError): + pass + + +class BootstrapError(RuntimeError): + pass + + +class ConfigError(RuntimeError): + """ + Meant to be used when an invalid config entry is found. + """ + pass + + +class ParseError(Exception): + pass + + +class CommandFailedError(Exception): + + """ + Exception thrown on command failure + """ + def __init__(self, command, exitstatus, node=None, label=None): + self.command = command + self.exitstatus = exitstatus + self.node = node + self.label = label + + def __str__(self): + prefix = "Command failed" + if self.label: + prefix += " ({label})".format(label=self.label) + if self.node: + prefix += " on {node}".format(node=self.node) + return "{prefix} with status {status}: {cmd!r}".format( + status=self.exitstatus, + cmd=self.command, + prefix=prefix, + ) + + def fingerprint(self): + """ + Returns a list of strings to group failures with. + Used by sentry instead of grouping by backtrace. + """ + return [ + self.label or self.command, + 'exit status {}'.format(self.exitstatus), + '{{ type }}', + ] + + +class AnsibleFailedError(Exception): + + """ + Exception thrown when an ansible playbook fails + """ + def __init__(self, failures): + self.failures = failures + + def __str__(self): + return "{failures}".format( + failures=self.failures, + ) + + +class CommandCrashedError(Exception): + + """ + Exception thrown on crash + """ + def __init__(self, command): + self.command = command + + def __str__(self): + return "Command crashed: {command!r}".format( + command=self.command, + ) + + +class ConnectionLostError(Exception): + + """ + Exception thrown when the connection is lost + """ + def __init__(self, command, node=None): + self.command = command + self.node = node + + def __str__(self): + node_str = 'to %s ' % self.node if self.node else '' + return "SSH connection {node_str}was lost: {command!r}".format( + node_str=node_str, + command=self.command, + ) + + +class ScheduleFailError(RuntimeError): + def __init__(self, message, name=None): + self.message = message + self.name = name + + def __str__(self): + return "Scheduling {name} failed: {msg}".format( + name=self.name, + msg=self.message, + ).replace(' ', ' ') + + +class VersionNotFoundError(Exception): + def __init__(self, url): + self.url = url + + def __str__(self): + return "Failed to fetch package version from %s" % self.url + + +class UnsupportedPackageTypeError(Exception): + def __init__(self, node): + self.node = node + + def __str__(self): + return "os.package_type {pkg_type!r} on {node}".format( + node=self.node, pkg_type=self.node.os.package_type) + + +class SELinuxError(Exception): + def __init__(self, node, denials): + self.node = node + self.denials = denials + + def __str__(self): + return "SELinux denials found on {node}: {denials}".format( + node=self.node, denials=self.denials) + + +class QuotaExceededError(Exception): + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message + + +class SkipJob(Exception): + """ + Used by teuthology.worker when it notices that a job is broken and should + be skipped. + """ + pass + + +class MaxWhileTries(Exception): + pass + + +class ConsoleError(Exception): + pass + + +class NoRemoteError(Exception): + message = "This operation requires a remote" + + def __str__(self): + return self.message + + +class UnitTestError(Exception): + """ + Exception thrown on unit test failure + """ + def __init__(self, command, exitstatus, node=None, label=None, message=""): + self.command = command + self.exitstatus = exitstatus + self.node = node + self.label = label + self.message = message + + def __str__(self): + prefix = "Unit test failed" + if self.label: + prefix += " ({label})".format(label=self.label) + if self.node: + prefix += " on {node}".format(node=self.node) + return "{prefix} with status {status}: {message}".format( + prefix=prefix, + status=self.exitstatus, + message=self.message, + ) diff --git a/teuthology/exit.py b/teuthology/exit.py new file mode 100644 index 0000000000..266e988eba --- /dev/null +++ b/teuthology/exit.py @@ -0,0 +1,78 @@ +import logging +import os +import signal + + +log = logging.getLogger(__name__) + + +class Exiter(object): + """ + A helper to manage any signal handlers we need to call upon receiving a + given signal + """ + def __init__(self): + self.handlers = list() + + def add_handler(self, signals, func): + """ + Adds a handler function to be called when any of the given signals are + received. + + The handler function should have a signature like:: + + my_handler(signal, frame) + """ + if isinstance(signals, int): + signals = [signals] + + for signal_ in signals: + signal.signal(signal_, self.default_handler) + + handler = Handler(self, func, signals) + log.debug( + "Installing handler: %s", + repr(handler), + ) + self.handlers.append(handler) + return handler + + def default_handler(self, signal_, frame): + log.debug( + "Got signal %s; running %s handler%s...", + signal_, + len(self.handlers), + '' if len(self.handlers) == 1 else 's', + ) + for handler in self.handlers: + handler.func(signal_, frame) + log.debug("Finished running handlers") + # Restore the default handler + signal.signal(signal_, 0) + # Re-send the signal to our main process + os.kill(os.getpid(), signal_) + + +class Handler(object): + def __init__(self, exiter, func, signals): + self.exiter = exiter + self.func = func + self.signals = signals + + def remove(self): + try: + log.debug("Removing handler: %s", self) + self.exiter.handlers.remove(self) + except ValueError: + pass + + def __repr__(self): + return "{c}(exiter={e}, func={f}, signals={s})".format( + c=self.__class__.__name__, + e=self.exiter, + f=self.func, + s=self.signals, + ) + + +exiter = Exiter() diff --git a/teuthology/job_status.py b/teuthology/job_status.py new file mode 100644 index 0000000000..05ff80d715 --- /dev/null +++ b/teuthology/job_status.py @@ -0,0 +1,38 @@ +def get_status(summary): + """ + :param summary: The job summary dict. Normally ctx.summary + :returns: A status string like 'pass', 'fail', or 'dead' + """ + status = summary.get('status') + if status is not None: + return status + + success = summary.get('success') + if success is True: + status = 'pass' + elif success is False: + status = 'fail' + else: + status = None + return status + + +def set_status(summary, status): + """ + Sets summary['status'] to status, and summary['success'] to True if status + is 'pass'. If status is not 'pass', then 'success' is False. + + If status is None, do nothing. + + :param summary: The job summary dict. Normally ctx.summary + :param status: The job status, e.g. 'pass', 'fail', 'dead' + """ + if status is None: + return + + summary['status'] = status + if status == 'pass': + summary['success'] = True + else: + summary['success'] = False + diff --git a/teuthology/kill.py b/teuthology/kill.py new file mode 100755 index 0000000000..5af11b628c --- /dev/null +++ b/teuthology/kill.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +import os +import sys +import yaml +import psutil +import subprocess +import tempfile +import logging +import getpass + + +from teuthology import beanstalk +from teuthology import report +from teuthology.config import config +from teuthology import misc + +log = logging.getLogger(__name__) + + +def main(args): + run_name = args['--run'] + job = args['--job'] + jobspec = args['--jobspec'] + archive_base = args['--archive'] + owner = args['--owner'] + machine_type = args['--machine-type'] + preserve_queue = args['--preserve-queue'] + + if jobspec: + split_spec = jobspec.split('/') + run_name = split_spec[0] + job = [split_spec[1]] + + if job: + for job_id in job: + kill_job(run_name, job_id, archive_base, owner) + else: + kill_run(run_name, archive_base, owner, machine_type, + preserve_queue=preserve_queue) + + +def kill_run(run_name, archive_base=None, owner=None, machine_type=None, + preserve_queue=False): + run_info = {} + serializer = report.ResultsSerializer(archive_base) + if archive_base: + run_archive_dir = os.path.join(archive_base, run_name) + if os.path.isdir(run_archive_dir): + run_info = find_run_info(serializer, run_name) + if 'machine_type' in run_info: + machine_type = run_info['machine_type'] + owner = run_info['owner'] + else: + log.warning("The run info does not have machine type: %s" % run_info) + log.warning("Run archive used: %s" % run_archive_dir) + log.info("Using machine type '%s' and owner '%s'" % (machine_type, owner)) + elif machine_type is None: + # no jobs found in archive and no machine type specified, + # so we try paddles to see if there is anything scheduled + run_info = report.ResultsReporter().get_run(run_name) + machine_type = run_info.get('machine_type', None) + if machine_type: + log.info(f"Using machine type '{machine_type}' received from paddles.") + else: + raise RuntimeError(f"Cannot find machine type for the run {run_name}; " + + "you must also pass --machine-type") + + if not preserve_queue: + remove_beanstalk_jobs(run_name, machine_type) + remove_paddles_jobs(run_name) + kill_processes(run_name, run_info.get('pids')) + if owner is not None: + targets = find_targets(run_name, owner) + nuke_targets(targets, owner) + + +def kill_job(run_name, job_id, archive_base=None, owner=None, skip_nuke=False): + serializer = report.ResultsSerializer(archive_base) + job_info = serializer.job_info(run_name, job_id) + if not owner: + if 'owner' not in job_info: + raise RuntimeError( + "I could not figure out the owner of the requested job. " + "Please pass --owner .") + owner = job_info['owner'] + kill_processes(run_name, [job_info.get('pid')]) + # Because targets can be missing for some cases, for example, when all + # the necessary nodes ain't locked yet, we do not use job_info to get them, + # but use find_targets(): + targets = find_targets(run_name, owner, job_id) + if not skip_nuke: + nuke_targets(targets, owner) + + +def find_run_info(serializer, run_name): + log.info("Assembling run information...") + run_info_fields = [ + 'machine_type', + 'owner', + ] + + pids = [] + run_info = {} + job_info = {} + job_num = 0 + jobs = serializer.jobs_for_run(run_name) + job_total = len(jobs) + for (job_id, job_dir) in jobs.items(): + if not os.path.isdir(job_dir): + continue + job_num += 1 + beanstalk.print_progress(job_num, job_total, 'Reading Job: ') + job_info = serializer.job_info(run_name, job_id, simple=True) + for key in job_info.keys(): + if key in run_info_fields and key not in run_info: + run_info[key] = job_info[key] + if 'pid' in job_info: + pids.append(job_info['pid']) + run_info['pids'] = pids + return run_info + + +def remove_paddles_jobs(run_name): + jobs = report.ResultsReporter().get_jobs(run_name, fields=['status']) + job_ids = [job['job_id'] for job in jobs if job['status'] == 'queued'] + if job_ids: + log.info("Deleting jobs from paddles: %s", str(job_ids)) + report.try_delete_jobs(run_name, job_ids) + + +def remove_beanstalk_jobs(run_name, tube_name): + qhost = config.queue_host + qport = config.queue_port + if qhost is None or qport is None: + raise RuntimeError( + 'Beanstalk queue information not found in {conf_path}'.format( + conf_path=config.yaml_path)) + log.info("Checking Beanstalk Queue...") + beanstalk_conn = beanstalk.connect() + real_tube_name = beanstalk.watch_tube(beanstalk_conn, tube_name) + + curjobs = beanstalk_conn.stats_tube(real_tube_name)['current-jobs-ready'] + if curjobs != 0: + x = 1 + while x != curjobs: + x += 1 + job = beanstalk_conn.reserve(timeout=20) + if job is None: + continue + job_config = yaml.safe_load(job.body) + if run_name == job_config['name']: + job_id = job.stats()['id'] + msg = "Deleting job from queue. ID: " + \ + "{id} Name: {name} Desc: {desc}".format( + id=str(job_id), + name=job_config['name'], + desc=job_config['description'], + ) + log.info(msg) + job.delete() + else: + print("No jobs in Beanstalk Queue") + beanstalk_conn.close() + + +def kill_processes(run_name, pids=None): + if pids: + to_kill = set(pids).intersection(psutil.pids()) + else: + to_kill = find_pids(run_name) + + # Remove processes that don't match run-name from the set + to_check = set(to_kill) + for pid in to_check: + if not process_matches_run(pid, run_name): + to_kill.remove(pid) + + if len(to_kill) == 0: + log.info("No teuthology processes running") + else: + log.info("Killing Pids: " + str(to_kill)) + may_need_sudo = \ + psutil.Process(int(pid)).username() != getpass.getuser() + if may_need_sudo: + sudo_works = subprocess.Popen(['sudo', '-n', 'true']).wait() == 0 + if not sudo_works: + log.debug("Passwordless sudo not configured; not using sudo") + use_sudo = may_need_sudo and sudo_works + for pid in to_kill: + args = ['kill', str(pid)] + # Don't attempt to use sudo if it's not necessary + if use_sudo: + args = ['sudo', '-n'] + args + subprocess.call(args) + + +def process_matches_run(pid, run_name): + try: + p = psutil.Process(pid) + cmd = p.cmdline() + if run_name in cmd and sys.argv[0] not in cmd: + return True + except psutil.NoSuchProcess: + pass + return False + + +def find_pids(run_name): + run_pids = [] + for pid in psutil.pids(): + if process_matches_run(pid, run_name): + run_pids.append(pid) + return run_pids + + +def find_targets(run_name, owner, job_id=None): + lock_args = [ + 'teuthology-lock', + '--list-targets', + '--desc-pattern', + '/' + run_name + '/' + str(job_id or ''), + '--status', + 'up', + '--owner', + owner + ] + proc = subprocess.Popen(lock_args, stdout=subprocess.PIPE) + stdout, stderr = proc.communicate() + out_obj = yaml.safe_load(stdout) + if not out_obj or 'targets' not in out_obj: + return {} + + return out_obj + + +def nuke_targets(targets_dict, owner): + targets = targets_dict.get('targets') + if not targets: + log.info("No locked machines. Not nuking anything") + return + + to_nuke = [] + for target in targets: + to_nuke.append(misc.decanonicalize_hostname(target)) + + target_file = tempfile.NamedTemporaryFile(delete=False, mode='w+t') + target_file.write(yaml.safe_dump(targets_dict)) + target_file.close() + + log.info("Nuking machines: " + str(to_nuke)) + nuke_args = [ + 'teuthology-nuke', + '-t', + target_file.name, + '--owner', + owner + ] + nuke_args.extend(['--reboot-all', '--unlock']) + + proc = subprocess.Popen( + nuke_args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + for line in proc.stdout: + line = line.replace(b'\r', b'').replace(b'\n', b'') + log.info(line.decode()) + sys.stdout.flush() + + os.unlink(target_file.name) diff --git a/teuthology/lock/__init__.py b/teuthology/lock/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/lock/cli.py b/teuthology/lock/cli.py new file mode 100644 index 0000000000..9cc8210a22 --- /dev/null +++ b/teuthology/lock/cli.py @@ -0,0 +1,300 @@ +import argparse +import collections +import json +import logging +import re + +import yaml + +import teuthology +import teuthology.parallel +import teuthology.provision +from teuthology import misc +from teuthology.config import set_config_attr + +from teuthology.lock import ( + ops, + util, + query, +) + + +log = logging.getLogger(__name__) + + +def main(ctx): + if ctx.verbose: + teuthology.log.setLevel(logging.DEBUG) + + set_config_attr(ctx) + + ret = 0 + user = ctx.owner + machines = [misc.canonicalize_hostname(m, user=False) + for m in ctx.machines] + machines_to_update = [] + + if ctx.targets: + try: + with open(ctx.targets) as f: + g = yaml.safe_load_all(f) + for new in g: + if 'targets' in new: + for t in new['targets'].keys(): + machines.append(t) + except IOError as e: + raise argparse.ArgumentTypeError(str(e)) + + if ctx.f: + assert ctx.lock or ctx.unlock, \ + '-f is only supported by --lock and --unlock' + if machines: + assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \ + or ctx.update or ctx.brief, \ + 'machines cannot be specified with that operation' + else: + if ctx.lock: + log.error("--lock requires specific machines passed as arguments") + else: + # This condition might never be hit, but it's not clear. + assert ctx.num_to_lock or ctx.list or ctx.list_targets or \ + ctx.summary or ctx.brief, \ + 'machines must be specified for that operation' + if ctx.all: + assert ctx.list or ctx.list_targets or ctx.brief, \ + '--all can only be used with --list, --list-targets, and --brief' + assert ctx.owner is None, \ + '--all and --owner are mutually exclusive' + assert not machines, \ + '--all and listing specific machines are incompatible' + if ctx.num_to_lock: + assert ctx.machine_type, \ + 'must specify machine type to lock' + + if ctx.brief or ctx.list or ctx.list_targets: + assert ctx.desc is None, '--desc does nothing with --list/--brief' + + # we may need to update host keys for vms. Don't do it for + # every vm; however, update any vms included in the list given + # to the CLI (machines), or any owned by the specified owner or + # invoking user if no machines are specified. + vmachines = [] + statuses = query.get_statuses(machines) + owner = ctx.owner or misc.get_user() + for machine in statuses: + if query.is_vm(status=machine) and machine['locked'] and \ + (machines or machine['locked_by'] == owner): + vmachines.append(machine['name']) + if vmachines: + log.info("updating host keys for %s", ' '.join(sorted(vmachines))) + ops.do_update_keys(vmachines, _raise=False) + # get statuses again to refresh any updated keys + statuses = query.get_statuses(machines) + if statuses: + statuses = util.winnow(statuses, ctx.machine_type, 'machine_type') + if not machines and ctx.owner is None and not ctx.all: + ctx.owner = misc.get_user() + statuses = util.winnow(statuses, ctx.owner, 'locked_by') + statuses = util.winnow(statuses, ctx.status, 'up', + lambda s: s['up'] == (ctx.status == 'up')) + statuses = util.winnow(statuses, ctx.locked, 'locked', + lambda s: s['locked'] == (ctx.locked == 'true')) + statuses = util.winnow(statuses, ctx.desc, 'description') + statuses = util.winnow(statuses, ctx.desc_pattern, 'description', + lambda s: s['description'] and \ + ctx.desc_pattern in s['description']) + if ctx.json_query: + statuses = util.json_matching_statuses(ctx.json_query, statuses) + statuses = util.winnow(statuses, ctx.os_type, 'os_type') + statuses = util.winnow(statuses, ctx.os_version, 'os_version') + + # When listing, only show the vm_host's name, not every detail + for s in statuses: + if not query.is_vm(status=s): + continue + # with an OpenStack API, there is no host for a VM + if s['vm_host'] is None: + continue + vm_host_name = s.get('vm_host', dict())['name'] + if vm_host_name: + s['vm_host'] = vm_host_name + if ctx.list: + print(json.dumps(statuses, indent=4)) + + elif ctx.brief: + maxname = max((len(_['name'] or '') + for _ in statuses), default=0) + maxuser = max((len(_['locked_by'] or 'None') + for _ in statuses), default=0) + node_status_template = ( + '{{host:<{name}}} {{up:<4}} {{locked:<8}} ' + '{{owner:<{user}}} "{{desc}}"' + ).format(name=maxname, user=maxuser) + for s in sorted(statuses, key=lambda s: s.get('name')): + locked = 'unlocked' if s['locked'] == 0 else 'locked' + up = 'up' if s['up'] else 'down' + mo = re.match('\w+@(\w+?)\..*', s['name']) + host = mo.group(1) if mo else s['name'] + print(node_status_template.format( + up=up, locked=locked, host=host, + owner=s['locked_by'] or 'None', desc=s['description'])) + + else: + frag = {'targets': {}} + for f in statuses: + frag['targets'][f['name']] = f['ssh_pub_key'] + print(yaml.safe_dump(frag, default_flow_style=False)) + else: + log.error('error retrieving lock statuses') + ret = 1 + + elif ctx.summary: + do_summary(ctx) + return 0 + + elif ctx.lock: + if not util.vps_version_or_type_valid( + ctx.machine_type, ctx.os_type, ctx.os_version): + log.error('Invalid os-type or version detected -- lock failed') + return 1 + reimage_types = teuthology.provision.get_reimage_types() + reimage_machines = list() + updatekeys_machines = list() + machine_types = dict() + for machine in machines: + resp = ops.lock_one(machine, user, ctx.desc) + if resp.ok: + machine_status = resp.json() + machine_type = machine_status['machine_type'] + machine_types[machine] = machine_type + if not resp.ok: + ret = 1 + if not ctx.f: + return ret + elif not query.is_vm(machine, machine_status): + if machine_type in reimage_types: + # Reimage in parallel just below here + reimage_machines.append(machine) + # Update keys last + updatekeys_machines = list() + else: + machines_to_update.append(machine) + ops.update_nodes([machine], True) + teuthology.provision.create_if_vm( + ctx, + misc.canonicalize_hostname(machine), + ) + with teuthology.parallel.parallel() as p: + ops.update_nodes(reimage_machines, True) + for machine in reimage_machines: + p.spawn(teuthology.provision.reimage, ctx, machine, machine_types[machine]) + for machine in updatekeys_machines: + ops.do_update_keys([machine]) + ops.update_nodes(reimage_machines + machines_to_update) + + elif ctx.unlock: + if ctx.owner is None and user is None: + user = misc.get_user() + # If none of them are vpm, do them all in one shot + if not filter(query.is_vm, machines): + res = ops.unlock_many(machines, user) + return 0 if res else 1 + for machine in machines: + if not ops.unlock_one(ctx, machine, user): + ret = 1 + if not ctx.f: + return ret + else: + machines_to_update.append(machine) + elif ctx.num_to_lock: + result = ops.lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user, + ctx.desc, ctx.os_type, ctx.os_version, ctx.arch) + if not result: + ret = 1 + else: + machines_to_update = result.keys() + if ctx.machine_type == 'vps': + shortnames = ' '.join( + [misc.decanonicalize_hostname(name) for name in + result.keys()] + ) + if len(result) < ctx.num_to_lock: + log.error("Locking failed.") + for machine in result: + ops.unlock_one(ctx, machine, user) + ret = 1 + else: + log.info("Successfully Locked:\n%s\n" % shortnames) + log.info( + "Unable to display keys at this time (virtual " + + "machines are booting).") + log.info( + "Please run teuthology-lock --list-targets %s once " + + "these machines come up.", + shortnames) + else: + print(yaml.safe_dump( + dict(targets=result), + default_flow_style=False)) + elif ctx.update: + assert ctx.desc is not None or ctx.status is not None, \ + 'you must specify description or status to update' + assert ctx.owner is None, 'only description and status may be updated' + machines_to_update = machines + + if ctx.desc is not None or ctx.status is not None: + for machine in machines_to_update: + ops.update_lock(machine, ctx.desc, ctx.status) + + return ret + + +def do_summary(ctx): + lockd = collections.defaultdict(lambda: [0, 0, 'unknown']) + if ctx.machine_type: + locks = query.list_locks(machine_type=ctx.machine_type) + else: + locks = query.list_locks() + for l in locks: + who = l['locked_by'] if l['locked'] == 1 \ + else '(free)', l['machine_type'] + lockd[who][0] += 1 + lockd[who][1] += 1 if l['up'] else 0 + lockd[who][2] = l['machine_type'] + + # sort locks by machine type and count + locks = sorted([p for p in lockd.items() + ], key=lambda sort: (sort[1][2] or '', sort[1][0])) + total_count, total_up = 0, 0 + print("TYPE COUNT UP OWNER") + + for (owner, (count, upcount, machinetype)) in locks: + # if machinetype == spectype: + print("{machinetype:8s} {count:3d} {up:3d} {owner}".format( + count=count, up=upcount, owner=owner[0], + machinetype=machinetype or '(none)')) + total_count += count + total_up += upcount + + print(" --- ---") + print("{cnt:12d} {up:3d}".format(cnt=total_count, up=total_up)) + + +def updatekeys(args): + loglevel = logging.DEBUG if args['--verbose'] else logging.INFO + logging.basicConfig( + level=loglevel, + ) + all_ = args['--all'] + machines = [] + if args['']: + machines = [misc.canonicalize_hostname(m, user=None) + for m in args['']] + elif args['--targets']: + targets = args['--targets'] + with open(targets) as f: + docs = yaml.safe_load_all(f) + for doc in docs: + machines = [n for n in doc.get('targets', dict()).keys()] + + return ops.do_update_keys(machines, all_)[0] diff --git a/teuthology/lock/ops.py b/teuthology/lock/ops.py new file mode 100644 index 0000000000..b0c7d8033f --- /dev/null +++ b/teuthology/lock/ops.py @@ -0,0 +1,449 @@ +import logging +import json +import os +import random +import time +import yaml + +import requests + +import teuthology.orchestra.remote +import teuthology.parallel +import teuthology.provision +from teuthology import misc +from teuthology import report +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.task import console_log +from teuthology.misc import canonicalize_hostname +from teuthology.job_status import set_status + +from teuthology.lock import util, query + +log = logging.getLogger(__name__) + + +def update_nodes(nodes, reset_os=False): + for node in nodes: + remote = teuthology.orchestra.remote.Remote( + canonicalize_hostname(node)) + if reset_os: + log.info("Updating [%s]: reset os type and version on server", node) + inventory_info = dict() + inventory_info['os_type'] = '' + inventory_info['os_version'] = '' + inventory_info['name'] = remote.hostname + else: + log.info("Updating [%s]: set os type and version on server", node) + inventory_info = remote.inventory_info + update_inventory(inventory_info) + + +def lock_many_openstack(ctx, num, machine_type, user=None, description=None, + arch=None): + os_type = teuthology.provision.get_distro(ctx) + os_version = teuthology.provision.get_distro_version(ctx) + if hasattr(ctx, 'config'): + resources_hint = ctx.config.get('openstack') + else: + resources_hint = None + machines = teuthology.provision.openstack.ProvisionOpenStack().create( + num, os_type, os_version, arch, resources_hint) + result = {} + for machine in machines: + lock_one(machine, user, description) + result[machine] = None # we do not collect ssh host keys yet + return result + + +def lock_many(ctx, num, machine_type, user=None, description=None, + os_type=None, os_version=None, arch=None, reimage=True): + if user is None: + user = misc.get_user() + + if not util.vps_version_or_type_valid( + ctx.machine_type, + os_type, + os_version + ): + log.error('Invalid os-type or version detected -- lock failed') + return + + # In the for loop below we can safely query for all bare-metal machine_type + # values at once. So, if we're being asked for 'plana,mira,burnupi', do it + # all in one shot. If we are passed 'plana,mira,burnupi,vps', do one query + # for 'plana,mira,burnupi' and one for 'vps' + machine_types_list = misc.get_multi_machine_types(machine_type) + if machine_types_list == ['vps']: + machine_types = machine_types_list + elif machine_types_list == ['openstack']: + return lock_many_openstack(ctx, num, machine_type, + user=user, + description=description, + arch=arch) + elif 'vps' in machine_types_list: + machine_types_non_vps = list(machine_types_list) + machine_types_non_vps.remove('vps') + machine_types_non_vps = '|'.join(machine_types_non_vps) + machine_types = [machine_types_non_vps, 'vps'] + else: + machine_types_str = '|'.join(machine_types_list) + machine_types = [machine_types_str, ] + + for machine_type in machine_types: + uri = os.path.join(config.lock_server, 'nodes', 'lock_many', '') + data = dict( + locked_by=user, + count=num, + machine_type=machine_type, + description=description, + ) + # Only query for os_type/os_version if non-vps and non-libcloud, since + # in that case we just create them. + vm_types = ['vps'] + teuthology.provision.cloud.get_types() + reimage_types = teuthology.provision.get_reimage_types() + if machine_type not in vm_types + reimage_types: + if os_type: + data['os_type'] = os_type + if os_version: + data['os_version'] = os_version + if arch: + data['arch'] = arch + log.debug("lock_many request: %s", repr(data)) + response = requests.post( + uri, + data=json.dumps(data), + headers={'content-type': 'application/json'}, + ) + if response.ok: + machines = dict() + for machine in response.json(): + key = misc.canonicalize_hostname( + machine['name'], + user=machine.get('user'), + ) + machines[key] = machine['ssh_pub_key'] + log.debug('locked {machines}'.format( + machines=', '.join(machines.keys()))) + if machine_type in vm_types: + ok_machs = {} + update_nodes(machines, True) + for machine in machines: + if teuthology.provision.create_if_vm(ctx, machine): + ok_machs[machine] = machines[machine] + else: + log.error('Unable to create virtual machine: %s', + machine) + unlock_one(ctx, machine, user) + ok_machs = do_update_keys(list(ok_machs.keys()))[1] + update_nodes(ok_machs) + return ok_machs + elif reimage and machine_type in reimage_types: + return reimage_machines(ctx, machines, machine_type) + return machines + elif response.status_code == 503: + log.error('Insufficient nodes available to lock %d %s nodes.', + num, machine_type) + log.error(response.text) + else: + log.error('Could not lock %d %s nodes, reason: unknown.', + num, machine_type) + return [] + + +def lock_one(name, user=None, description=None): + name = misc.canonicalize_hostname(name, user=None) + if user is None: + user = misc.get_user() + request = dict(name=name, locked=True, locked_by=user, + description=description) + uri = os.path.join(config.lock_server, 'nodes', name, 'lock', '') + response = requests.put(uri, json.dumps(request)) + success = response.ok + if success: + log.debug('locked %s as %s', name, user) + else: + try: + reason = response.json().get('message') + except ValueError: + reason = str(response.status_code) + log.error('failed to lock {node}. reason: {reason}'.format( + node=name, reason=reason)) + return response + + +def unlock_many(names, user): + fixed_names = [misc.canonicalize_hostname(name, user=None) for name in + names] + names = fixed_names + uri = os.path.join(config.lock_server, 'nodes', 'unlock_many', '') + data = dict( + locked_by=user, + names=names, + ) + with safe_while( + sleep=1, increment=0.5, action=f'unlock_many {names}') as proceed: + while proceed(): + response = requests.post( + uri, + data=json.dumps(data), + headers={'content-type': 'application/json'}, + ) + if response.ok: + log.debug("Unlocked: %s", ', '.join(names)) + return True + log.error("Failed to unlock: %s", ', '.join(names)) + return False + + +def unlock_one(ctx, name, user, description=None): + name = misc.canonicalize_hostname(name, user=None) + if not teuthology.provision.destroy_if_vm(ctx, name, user, description): + log.error('destroy failed for %s', name) + return False + request = dict(name=name, locked=False, locked_by=user, + description=description) + uri = os.path.join(config.lock_server, 'nodes', name, 'lock', '') + with safe_while( + sleep=1, increment=0.5, action="unlock %s" % name) as proceed: + while proceed(): + try: + response = requests.put(uri, json.dumps(request)) + if response.ok: + log.info('unlocked: %s', name) + return response.ok + # Work around https://github.com/kennethreitz/requests/issues/2364 + except requests.ConnectionError as e: + log.warning("Saw %s while unlocking; retrying...", str(e)) + try: + reason = response.json().get('message') + except ValueError: + reason = str(response.status_code) + log.error('failed to unlock {node}. reason: {reason}'.format( + node=name, reason=reason)) + return False + + +def update_lock(name, description=None, status=None, ssh_pub_key=None): + name = misc.canonicalize_hostname(name, user=None) + updated = {} + if description is not None: + updated['description'] = description + if status is not None: + updated['up'] = (status == 'up') + if ssh_pub_key is not None: + updated['ssh_pub_key'] = ssh_pub_key + + if updated: + uri = os.path.join(config.lock_server, 'nodes', name, '') + inc = random.uniform(0, 1) + with safe_while( + sleep=1, increment=inc, action=f'update lock {name}') as proceed: + while proceed(): + response = requests.put( + uri, + json.dumps(updated)) + if response.ok: + return True + return response.ok + return True + + +def update_inventory(node_dict): + """ + Like update_lock(), but takes a dict and doesn't try to do anything smart + by itself + """ + name = node_dict.get('name') + if not name: + raise ValueError("must specify name") + if not config.lock_server: + return + uri = os.path.join(config.lock_server, 'nodes', name, '') + log.info("Updating %s on lock server", name) + inc = random.uniform(0, 1) + with safe_while( + sleep=1, increment=inc, action=f'update inventory {name}') as proceed: + while proceed(): + response = requests.put( + uri, + json.dumps(node_dict), + headers={'content-type': 'application/json'}, + ) + if response.status_code == 404: + log.info("Creating new node %s on lock server", name) + uri = os.path.join(config.lock_server, 'nodes', '') + response = requests.post( + uri, + json.dumps(node_dict), + headers={'content-type': 'application/json'}, + ) + if response.ok: + return + +def do_update_keys(machines, all_=False, _raise=True): + reference = query.list_locks(keyed_by_name=True) + if all_: + machines = reference.keys() + keys_dict = misc.ssh_keyscan(machines, _raise=_raise) + return push_new_keys(keys_dict, reference), keys_dict + + +def push_new_keys(keys_dict, reference): + ret = 0 + for hostname, pubkey in keys_dict.items(): + log.info('Checking %s', hostname) + if reference[hostname]['ssh_pub_key'] != pubkey: + log.info('New key found. Updating...') + if not update_lock(hostname, ssh_pub_key=pubkey): + log.error('failed to update %s!', hostname) + ret = 1 + return ret + + +def reimage_machines(ctx, machines, machine_type): + reimage_types = teuthology.provision.get_reimage_types() + if machine_type not in reimage_types: + log.info(f"Skipping reimage of {machines.keys()} because {machine_type} is not in {reimage_types}") + return machines + # Setup log file, reimage machines and update their keys + reimaged = dict() + console_log_conf = dict( + logfile_name='{shortname}_reimage.log', + remotes=[teuthology.orchestra.remote.Remote(machine) + for machine in machines], + ) + with console_log.task(ctx, console_log_conf): + with teuthology.parallel.parallel() as p: + for machine in machines: + log.info("Start node '%s' reimaging", machine) + update_nodes([machine], True) + p.spawn(teuthology.provision.reimage, ctx, + machine, machine_type) + reimaged[machine] = machines[machine] + log.info("Node '%s' reimaging is complete", machine) + reimaged = do_update_keys(list(reimaged.keys()))[1] + update_nodes(reimaged) + return reimaged + + +def block_and_lock_machines(ctx, total_requested, machine_type, reimage=True): + # It's OK for os_type and os_version to be None here. If we're trying + # to lock a bare metal machine, we'll take whatever is available. If + # we want a vps, defaults will be provided by misc.get_distro and + # misc.get_distro_version in provision.create_if_vm + os_type = ctx.config.get("os_type") + os_version = ctx.config.get("os_version") + arch = ctx.config.get('arch') + reserved = config.reserve_machines + assert isinstance(reserved, int), 'reserve_machines must be integer' + assert (reserved >= 0), 'reserve_machines should >= 0' + + log.info('Locking machines...') + # change the status during the locking process + report.try_push_job_info(ctx.config, dict(status='waiting')) + + all_locked = dict() + requested = total_requested + while True: + # get a candidate list of machines + machines = query.list_locks(machine_type=machine_type, up=True, + locked=False, count=requested + reserved) + if machines is None: + if ctx.block: + log.error('Error listing machines, trying again') + time.sleep(20) + continue + else: + raise RuntimeError('Error listing machines') + + # make sure there are machines for non-automated jobs to run + if len(machines) < reserved + requested \ + and ctx.owner.startswith('scheduled'): + if ctx.block: + log.info( + 'waiting for more %s machines to be free (need %s + %s, have %s)...', + machine_type, + reserved, + requested, + len(machines), + ) + time.sleep(10) + continue + else: + assert 0, ('not enough machines free; need %s + %s, have %s' % + (reserved, requested, len(machines))) + + try: + newly_locked = lock_many(ctx, requested, machine_type, + ctx.owner, ctx.archive, os_type, + os_version, arch, reimage=reimage) + except Exception: + # Lock failures should map to the 'dead' status instead of 'fail' + if 'summary' in ctx: + set_status(ctx.summary, 'dead') + raise + all_locked.update(newly_locked) + log.info( + '{newly_locked} {mtype} machines locked this try, ' + '{total_locked}/{total_requested} locked so far'.format( + newly_locked=len(newly_locked), + mtype=machine_type, + total_locked=len(all_locked), + total_requested=total_requested, + ) + ) + if len(all_locked) == total_requested: + vmlist = [] + for lmach in all_locked: + if teuthology.lock.query.is_vm(lmach): + vmlist.append(lmach) + if vmlist: + log.info('Waiting for virtual machines to come up') + keys_dict = dict() + loopcount = 0 + while len(keys_dict) != len(vmlist): + loopcount += 1 + time.sleep(10) + keys_dict = misc.ssh_keyscan(vmlist) + log.info('virtual machine is still unavailable') + if loopcount == 40: + loopcount = 0 + log.info('virtual machine(s) still not up, ' + + 'recreating unresponsive ones.') + for guest in vmlist: + if guest not in keys_dict.keys(): + log.info('recreating: ' + guest) + full_name = misc.canonicalize_hostname(guest) + teuthology.provision.destroy_if_vm(ctx, full_name) + teuthology.provision.create_if_vm(ctx, full_name) + if teuthology.lock.ops.do_update_keys(keys_dict)[0]: + log.info("Error in virtual machine keys") + newscandict = {} + for dkey in all_locked.keys(): + stats = teuthology.lock.query.get_status(dkey) + newscandict[dkey] = stats['ssh_pub_key'] + ctx.config['targets'] = newscandict + else: + ctx.config['targets'] = all_locked + locked_targets = yaml.safe_dump( + ctx.config['targets'], + default_flow_style=False + ).splitlines() + log.info('\n '.join(['Locked targets:', ] + locked_targets)) + # successfully locked machines, change status back to running + report.try_push_job_info(ctx.config, dict(status='running')) + break + elif not ctx.block: + assert 0, 'not enough machines are available' + else: + requested = requested - len(newly_locked) + assert requested > 0, "lock_machines: requested counter went" \ + "negative, this shouldn't happen" + + log.info( + "{total} machines locked ({new} new); need {more} more".format( + total=len(all_locked), new=len(newly_locked), more=requested) + ) + log.warning('Could not lock enough machines, waiting...') + time.sleep(10) diff --git a/teuthology/lock/query.py b/teuthology/lock/query.py new file mode 100644 index 0000000000..bb1044c2b3 --- /dev/null +++ b/teuthology/lock/query.py @@ -0,0 +1,153 @@ +import logging +import os + +import requests + +from teuthology import misc +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.util.compat import urlencode + + +log = logging.getLogger(__name__) + + +def get_status(name): + name = misc.canonicalize_hostname(name, user=None) + uri = os.path.join(config.lock_server, 'nodes', name, '') + with safe_while( + sleep=1, increment=0.5, action=f'get_status {name}') as proceed: + while proceed(): + response = requests.get(uri) + if response.ok: + return response.json() + log.warning( + "Failed to query lock server for status of {name}".format(name=name)) + return dict() + + +def get_statuses(machines): + if machines: + statuses = [] + for machine in machines: + machine = misc.canonicalize_hostname(machine) + status = get_status(machine) + if status: + statuses.append(status) + else: + log.error("Lockserver doesn't know about machine: %s" % + machine) + else: + statuses = list_locks() + return statuses + + +def is_vm(name=None, status=None): + if status is None: + if name is None: + raise ValueError("Must provide either name or status, or both") + name = misc.canonicalize_hostname(name) + status = get_status(name) + return status.get('is_vm', False) + + +def list_locks(keyed_by_name=False, **kwargs): + uri = os.path.join(config.lock_server, 'nodes', '') + for key, value in kwargs.items(): + if kwargs[key] is False: + kwargs[key] = '0' + if kwargs[key] is True: + kwargs[key] = '1' + if kwargs: + if 'machine_type' in kwargs: + kwargs['machine_type'] = kwargs['machine_type'].replace(',','|') + uri += '?' + urlencode(kwargs) + with safe_while( + sleep=1, increment=0.5, action='list_locks') as proceed: + while proceed(): + try: + response = requests.get(uri) + if response.ok: + break + except requests.ConnectionError: + log.exception("Could not contact lock server: %s, retrying...", config.lock_server) + if response.ok: + if not keyed_by_name: + return response.json() + else: + return {node['name']: node + for node in response.json()} + return dict() + + +def find_stale_locks(owner=None): + """ + Return a list of node dicts corresponding to nodes that were locked to run + a job, but the job is no longer running. The purpose of this is to enable + us to nuke nodes that were left locked due to e.g. infrastructure failures + and return them to the pool. + + :param owner: If non-None, return nodes locked by owner. Default is None. + """ + def might_be_stale(node_dict): + """ + Answer the question: "might this be a stale lock?" + + The answer is yes if: + It is locked + It has a non-null description containing multiple '/' characters + + ... because we really want "nodes that were locked for a particular job + and are still locked" and the above is currently the best way to guess. + """ + desc = node_dict['description'] + if (node_dict['locked'] is True and + desc is not None and desc.startswith('/') and + desc.count('/') > 1): + return True + return False + + # Which nodes are locked for jobs? + nodes = list_locks(locked=True) + if owner is not None: + nodes = [node for node in nodes if node['locked_by'] == owner] + nodes = filter(might_be_stale, nodes) + + def node_job_is_active(node, cache): + """ + Is this node's job active (e.g. running or waiting)? + + :param node: The node dict as returned from the lock server + :param cache: A set() used for caching results + :returns: True or False + """ + description = node['description'] + if description in cache: + return True + (name, job_id) = description.split('/')[-2:] + url = os.path.join(config.results_server, 'runs', name, 'jobs', job_id, + '') + with safe_while( + sleep=1, increment=0.5, action='node_is_active') as proceed: + while proceed(): + resp = requests.get(url) + if resp.ok: + break + if not resp.ok: + return False + job_info = resp.json() + if job_info['status'] in ('running', 'waiting'): + cache.add(description) + return True + return False + + result = list() + # Here we build the list of of nodes that are locked, for a job (as opposed + # to being locked manually for random monkeying), where the job is not + # running + active_jobs = set() + for node in nodes: + if node_job_is_active(node, active_jobs): + continue + result.append(node) + return result diff --git a/teuthology/lock/test/__init__.py b/teuthology/lock/test/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/lock/test/test_lock.py b/teuthology/lock/test/test_lock.py new file mode 100644 index 0000000000..5f1679afc3 --- /dev/null +++ b/teuthology/lock/test/test_lock.py @@ -0,0 +1,7 @@ +import teuthology.lock.util + +class TestLock(object): + + def test_locked_since_seconds(self): + node = { "locked_since": "2013-02-07 19:33:55.000000" } + assert teuthology.lock.util.locked_since_seconds(node) > 3600 diff --git a/teuthology/lock/util.py b/teuthology/lock/util.py new file mode 100644 index 0000000000..91f957eab7 --- /dev/null +++ b/teuthology/lock/util.py @@ -0,0 +1,121 @@ +import datetime +import json +import logging + +from teuthology import misc +import teuthology.provision.downburst + +log = logging.getLogger(__name__) + + +def vps_version_or_type_valid(machine_type, os_type, os_version): + """ + Check os-type and os-version parameters when locking a vps. + Os-type will always be set (defaults to ubuntu). + + In the case where downburst does not handle list-json (an older version + of downburst, for instance), a message is printed and this checking + is skipped (so that this code should behave as it did before this + check was added). + """ + if not machine_type == 'vps': + return True + if os_type is None or os_version is None: + # we'll use the defaults provided by provision.create_if_vm + # later on during provisioning + return True + valid_os_and_version = \ + teuthology.provision.downburst.get_distro_from_downburst() + if os_type not in valid_os_and_version: + log.error("os-type '%s' is invalid. Try one of: %s", + os_type, + ', '.join(valid_os_and_version.keys())) + return False + if not validate_distro_version(os_version, + valid_os_and_version[os_type]): + log.error( + "os-version '%s' is invalid for os-type '%s'. Try one of: %s", + os_version, + os_type, + ', '.join(valid_os_and_version[os_type])) + return False + return True + + +def validate_distro_version(version, supported_versions): + """ + Return True if the version is valid. For Ubuntu, possible + supported version values are of the form '12.04 (precise)' where + either the number of the version name is acceptable. + """ + if version in supported_versions: + return True + for parts in supported_versions: + part = parts.split('(') + if len(part) == 2: + if version == part[0]: + return True + if version == part[1][0:len(part[1])-1]: + return True + + +def json_matching_statuses(json_file_or_str, statuses): + """ + Filter statuses by json dict in file or fragment; return list of + matching statuses. json_file_or_str must be a file containing + json or json in a string. + """ + try: + open(json_file_or_str, 'r') + except IOError: + query = json.loads(json_file_or_str) + else: + query = json.load(json_file_or_str) + + if not isinstance(query, dict): + raise RuntimeError('--json-query must be a dict') + + return_statuses = list() + for status in statuses: + for k, v in query.items(): + if not misc.is_in_dict(k, v, status): + break + else: + return_statuses.append(status) + + return return_statuses + + +def winnow(statuses, arg, status_key, func=None): + """ + Call with a list of statuses, and the ctx. + 'arg' that you may want to filter by. + If arg is not None, filter statuses by either: + + 1) func=None: filter by status[status_key] == arg + remove any status that fails + + 2) func=: remove any + status for which func returns False + + Return the possibly-smaller set of statuses. + """ + + if arg is not None: + if func: + statuses = [_status for _status in statuses + if func(_status)] + else: + statuses = [_status for _status in statuses + if _status[status_key] == arg] + + return statuses + + +def locked_since_seconds(node): + now = datetime.datetime.now() + since = datetime.datetime.strptime( + node['locked_since'], '%Y-%m-%d %H:%M:%S.%f') + return (now - since).total_seconds() + + diff --git a/teuthology/ls.py b/teuthology/ls.py new file mode 100644 index 0000000000..a50a59d176 --- /dev/null +++ b/teuthology/ls.py @@ -0,0 +1,69 @@ +from __future__ import print_function + +import os +import yaml +import errno +import re + +from teuthology.job_status import get_status + + +def main(args): + return ls(args[""], args["--verbose"]) + + +def ls(archive_dir, verbose): + for j in get_jobs(archive_dir): + job_dir = os.path.join(archive_dir, j) + summary = {} + try: + with open(os.path.join(job_dir, 'summary.yaml')) as f: + g = yaml.safe_load_all(f) + for new in g: + summary.update(new) + except IOError as e: + if e.errno == errno.ENOENT: + print_debug_info(j, job_dir, archive_dir) + continue + else: + raise + + print("{job} {status} {owner} {desc} {duration}s".format( + job=j, + owner=summary.get('owner', '-'), + desc=summary.get('description', '-'), + status=get_status(summary), + duration=int(summary.get('duration', 0)), + )) + if verbose and 'failure_reason' in summary: + print(' {reason}'.format(reason=summary['failure_reason'])) + + +def get_jobs(archive_dir): + dir_contents = os.listdir(archive_dir) + + def is_job_dir(parent, subdir): + if (os.path.isdir(os.path.join(parent, subdir)) and re.match('\d+$', + subdir)): + return True + return False + + jobs = [job for job in dir_contents if is_job_dir(archive_dir, job)] + return sorted(jobs) + + +def print_debug_info(job, job_dir, archive_dir): + print('%s ' % job, end='') + + try: + log_path = os.path.join(archive_dir, job, 'teuthology.log') + if os.path.exists(log_path): + tail = os.popen( + 'tail -1 %s' % log_path + ).read().rstrip() + print(tail, end='') + else: + print('', end='') + except IOError: + pass + print('') diff --git a/teuthology/misc.py b/teuthology/misc.py new file mode 100644 index 0000000000..9f748f39d2 --- /dev/null +++ b/teuthology/misc.py @@ -0,0 +1,1385 @@ +""" +Miscellaneous teuthology functions. +Used by other modules, but mostly called from tasks. +""" +import argparse +import os +import logging +import configobj +import getpass +import shutil +import socket +import subprocess +import tarfile +import time +import yaml +import json +import re +from sys import stdin +import pprint +import datetime +from types import MappingProxyType + +from tarfile import ReadError + +from teuthology.util.compat import urljoin, urlopen, HTTPError + +from netaddr.strategy.ipv4 import valid_str as _is_ipv4 +from netaddr.strategy.ipv6 import valid_str as _is_ipv6 +from teuthology import safepath +from teuthology.exceptions import (CommandCrashedError, CommandFailedError, + ConnectionLostError) +from teuthology.orchestra import run +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.orchestra.opsys import DEFAULT_OS_VERSION + + +log = logging.getLogger(__name__) + +stamp = datetime.datetime.now().strftime("%y%m%d%H%M") + +is_arm = lambda x: x.startswith('tala') or x.startswith( + 'ubuntu@tala') or x.startswith('saya') or x.startswith('ubuntu@saya') + +hostname_expr_templ = '(?P.*@)?(?P.*){lab_domain}' + +def host_shortname(hostname): + if _is_ipv4(hostname) or _is_ipv6(hostname): + return hostname + else: + return hostname.split('.', 1)[0] + +def canonicalize_hostname(hostname, user='ubuntu'): + hostname_expr = hostname_expr_templ.format( + lab_domain=config.lab_domain.replace('.', '\.')) + match = re.match(hostname_expr, hostname) + if _is_ipv4(hostname) or _is_ipv6(hostname): + return "%s@%s" % (user, hostname) + if match: + match_d = match.groupdict() + shortname = match_d['shortname'] + if user is None: + user_ = user + else: + user_ = match_d.get('user') or user + else: + shortname = host_shortname(hostname) + user_ = user + + user_at = user_.strip('@') + '@' if user_ else '' + domain = config.lab_domain + if domain and not shortname.endswith('.'): + domain = '.' + domain + ret = '{user_at}{short}{domain}'.format( + user_at=user_at, + short=shortname, + domain=domain, + ) + return ret + + +def decanonicalize_hostname(hostname): + lab_domain = '' + if config.lab_domain: + lab_domain='\.' + config.lab_domain.replace('.', '\.') + hostname_expr = hostname_expr_templ.format(lab_domain=lab_domain) + match = re.match(hostname_expr, hostname) + if match: + hostname = match.groupdict()['shortname'] + return hostname + + +def config_file(string): + """ + Create a config file + + :param string: name of yaml file used for config. + :returns: Dictionary of configuration information. + """ + config_dict = {} + try: + with open(string) as f: + g = yaml.safe_load_all(f) + for new in g: + config_dict.update(new) + except IOError as e: + raise argparse.ArgumentTypeError(str(e)) + return config_dict + + +class MergeConfig(argparse.Action): + """ + Used by scripts to mergeg configurations. (nuke, run, and + schedule, for example) + """ + def __call__(self, parser, namespace, values, option_string=None): + """ + Perform merges of all the day in the config dictionaries. + """ + config_dict = getattr(namespace, self.dest) + for new in values: + deep_merge(config_dict, new) + + +def merge_configs(config_paths): + """ Takes one or many paths to yaml config files and merges them + together, returning the result. + """ + conf_dict = dict() + for conf_path in config_paths: + if conf_path == "-": + partial_dict = yaml.safe_load(stdin) + elif not os.path.exists(conf_path): + log.debug("The config path {0} does not exist, skipping.".format(conf_path)) + continue + else: + with open(conf_path) as partial_file: + partial_dict = yaml.safe_load(partial_file) + try: + conf_dict = deep_merge(conf_dict, partial_dict) + except Exception: + # TODO: Should this log as well? + pprint.pprint("failed to merge {0} into {1}".format(conf_dict, partial_dict)) + raise + + return conf_dict + + +def get_testdir(ctx=None): + """ + :param ctx: Unused; accepted for compatibility + :returns: A test directory + """ + if 'test_path' in config: + return config['test_path'] + return config.get( + 'test_path', + '/home/%s/cephtest' % get_test_user() + ) + + +def get_test_user(ctx=None): + """ + :param ctx: Unused; accepted for compatibility + :returns: str -- the user to run tests as on remote hosts + """ + return config.get('test_user', 'ubuntu') + + +def get_archive_dir(ctx): + """ + :returns: archive directory (a subdirectory of the test directory) + """ + test_dir = get_testdir(ctx) + return os.path.normpath(os.path.join(test_dir, 'archive')) + + +def get_http_log_path(archive_dir, job_id=None): + """ + :param archive_dir: directory to be searched + :param job_id: id of job that terminates the name of the log path + :returns: http log path + """ + http_base = config.archive_server + if not http_base: + return None + + sep = os.path.sep + archive_dir = archive_dir.rstrip(sep) + archive_subdir = archive_dir.split(sep)[-1] + if archive_subdir.endswith(str(job_id)): + archive_subdir = archive_dir.split(sep)[-2] + + if job_id is None: + return os.path.join(http_base, archive_subdir, '') + return os.path.join(http_base, archive_subdir, str(job_id), '') + + +def get_results_url(run_name, job_id=None): + """ + :param run_name: The name of the test run + :param job_id: The job_id of the job. Optional. + :returns: URL to the run (or job, if job_id is passed) in the results web + UI. For example, Inktank uses Pulpito. + """ + if not config.results_ui_server: + return None + base_url = config.results_ui_server + + if job_id is None: + return os.path.join(base_url, run_name, '') + return os.path.join(base_url, run_name, str(job_id), '') + + +def get_ceph_binary_url(package=None, + branch=None, tag=None, sha1=None, dist=None, + flavor=None, format=None, arch=None): + """ + return the url of the ceph binary found on gitbuildder. + """ + BASE = 'http://{host}/{package}-{format}-{dist}-{arch}-{flavor}/'.format( + host=config.gitbuilder_host, + package=package, + flavor=flavor, + arch=arch, + format=format, + dist=dist + ) + + if sha1 is not None: + assert branch is None, "cannot set both sha1 and branch" + assert tag is None, "cannot set both sha1 and tag" + else: + # gitbuilder uses remote-style ref names for branches, mangled to + # have underscores instead of slashes; e.g. origin_main + if tag is not None: + ref = tag + assert branch is None, "cannot set both branch and tag" + else: + if branch is None: + branch = 'main' + ref = branch + + sha1_url = urljoin(BASE, 'ref/{ref}/sha1'.format(ref=ref)) + log.debug('Translating ref to sha1 using url %s', sha1_url) + + try: + sha1_fp = urlopen(sha1_url) + sha1 = sha1_fp.read().rstrip('\n') + sha1_fp.close() + except HTTPError as e: + log.error('Failed to get url %s', sha1_url) + raise e + + log.debug('Using %s %s sha1 %s', package, format, sha1) + bindir_url = urljoin(BASE, 'sha1/{sha1}/'.format(sha1=sha1)) + return (sha1, bindir_url) + + +def feed_many_stdins(fp, processes): + """ + :param fp: input file + :param processes: list of processes to be written to. + """ + while True: + data = fp.read(8192) + if not data: + break + for proc in processes: + proc.stdin.write(data) + + +def feed_many_stdins_and_close(fp, processes): + """ + Feed many and then close processes. + + :param fp: input file + :param processes: list of processes to be written to. + """ + feed_many_stdins(fp, processes) + for proc in processes: + proc.stdin.close() + + +def get_mons(roles, ips, + mon_bind_msgr2=False, + mon_bind_addrvec=False): + """ + Get monitors and their associated addresses + """ + mons = {} + mon_ports = {} + mon_id = 0 + is_mon = is_type('mon') + for idx, roles in enumerate(roles): + for role in roles: + if not is_mon(role): + continue + if ips[idx] not in mon_ports: + mon_ports[ips[idx]] = 6789 + else: + mon_ports[ips[idx]] += 1 + if mon_bind_msgr2: + assert mon_bind_addrvec + addr = 'v2:{ip}:{port},v1:{ip}:{port2}'.format( + ip=ips[idx], + port=mon_ports[ips[idx]], + port2=mon_ports[ips[idx]] + 1, + ) + mon_ports[ips[idx]] += 1 + elif mon_bind_addrvec: + addr = 'v1:{ip}:{port}'.format( + ip=ips[idx], + port=mon_ports[ips[idx]], + ) + else: + addr = '{ip}:{port}'.format( + ip=ips[idx], + port=mon_ports[ips[idx]], + ) + mon_id += 1 + mons[role] = addr + assert mons + return mons + + +def skeleton_config(ctx, roles, ips, cluster='ceph', + mon_bind_msgr2=False, + mon_bind_addrvec=False): + """ + Returns a ConfigObj that is prefilled with a skeleton config. + + Use conf[section][key]=value or conf.merge to change it. + + Use conf.write to write it out, override .filename first if you want. + """ + path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template') + conf = configobj.ConfigObj(path, file_error=True) + mons = get_mons(roles=roles, ips=ips, + mon_bind_msgr2=mon_bind_msgr2, + mon_bind_addrvec=mon_bind_addrvec) + for role, addr in mons.items(): + mon_cluster, _, _ = split_role(role) + if mon_cluster != cluster: + continue + name = ceph_role(role) + conf.setdefault(name, {}) + conf[name]['mon addr'] = addr + # set up standby mds's + is_mds = is_type('mds', cluster) + for roles_subset in roles: + for role in roles_subset: + if is_mds(role): + name = ceph_role(role) + conf.setdefault(name, {}) + if '-s-' in name: + standby_mds = name[name.find('-s-') + 3:] + conf[name]['mds standby for name'] = standby_mds + return conf + + +def ceph_role(role): + """ + Return the ceph name for the role, without any cluster prefix, e.g. osd.0. + """ + _, type_, id_ = split_role(role) + return type_ + '.' + id_ + + +def split_role(role): + """ + Return a tuple of cluster, type, and id + If no cluster is included in the role, the default cluster, 'ceph', is used + """ + cluster = 'ceph' + if role.count('.') > 1: + cluster, role = role.split('.', 1) + type_, id_ = role.split('.', 1) + return cluster, type_, id_ + + +def roles_of_type(roles_for_host, type_): + """ + Generator of ids. + + Each call returns the next possible role of the type specified. + :param roles_for_host: list of roles possible + :param type_: type of role + """ + for role in cluster_roles_of_type(roles_for_host, type_, None): + _, _, id_ = split_role(role) + yield id_ + + +def cluster_roles_of_type(roles_for_host, type_, cluster): + """ + Generator of roles. + + Each call returns the next possible role of the type specified. + :param roles_for_host: list of roles possible + :param type_: type of role + :param cluster: cluster name + """ + is_type_in_cluster = is_type(type_, cluster) + for role in roles_for_host: + if not is_type_in_cluster(role): + continue + yield role + + +def all_roles(cluster): + """ + Generator of role values. Each call returns another role. + + :param cluster: Cluster extracted from the ctx. + """ + for _, roles_for_host in cluster.remotes.items(): + for name in roles_for_host: + yield name + + +def all_roles_of_type(cluster, type_): + """ + Generator of role values. Each call returns another role of the + type specified. + + :param cluster: Cluster extracted from the ctx. + :param type_: role type + """ + for _, roles_for_host in cluster.remotes.items(): + for id_ in roles_of_type(roles_for_host, type_): + yield id_ + + +def is_type(type_, cluster=None): + """ + Returns a matcher function for whether role is of type given. + + :param cluster: cluster name to check in matcher (default to no check for cluster) + """ + def _is_type(role): + """ + Return type based on the starting role name. + + If there is more than one period, strip the first part + (ostensibly a cluster name) and check the remainder for the prefix. + """ + role_cluster, role_type, _ = split_role(role) + if cluster is not None and role_cluster != cluster: + return False + return role_type == type_ + return _is_type + + +def num_instances_of_type(cluster, type_, ceph_cluster='ceph'): + """ + Total the number of instances of the role type specified in all remotes. + + :param cluster: Cluster extracted from ctx. + :param type_: role + :param ceph_cluster: filter for ceph cluster name + """ + remotes_and_roles = cluster.remotes.items() + roles = [roles for (remote, roles) in remotes_and_roles] + is_ceph_type = is_type(type_, ceph_cluster) + num = sum(sum(1 for role in hostroles if is_ceph_type(role)) + for hostroles in roles) + return num + + +def create_simple_monmap(ctx, remote, conf, path=None, + mon_bind_addrvec=False): + """ + Writes a simple monmap based on current ceph.conf into path, or + /monmap by default. + + Assumes ceph_conf is up to date. + + Assumes mon sections are named "mon.*", with the dot. + + :return the FSID (as a string) of the newly created monmap + """ + def gen_addresses(): + """ + Monitor address generator. + + Each invocation returns the next monitor address + """ + for section, data in conf.items(): + PREFIX = 'mon.' + if not section.startswith(PREFIX): + continue + name = section[len(PREFIX):] + addr = data['mon addr'] + yield (name, addr) + + addresses = list(gen_addresses()) + assert addresses, "There are no monitors in config!" + log.debug('Ceph mon addresses: %s', addresses) + + testdir = get_testdir(ctx) + args = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'monmaptool', + '--create', + '--clobber', + ] + for (name, addr) in addresses: + if mon_bind_addrvec: + args.extend(('--addv', name, addr)) + else: + args.extend(('--add', name, addr)) + if not path: + path = '{tdir}/monmap'.format(tdir=testdir) + args.extend([ + '--print', + path + ]) + + monmap_output = remote.sh(args) + fsid = re.search("generated fsid (.+)$", + monmap_output, re.MULTILINE).group(1) + return fsid + + +def write_file(remote, path, data): + """ + Write data to a remote file + + :param remote: Remote site. + :param path: Path on the remote being written to. + :param data: Data to be written. + """ + remote.write_file(path, data) + + +def sudo_write_file(remote, path, data, perms=None, owner=None): + """ + Write data to a remote file as super user + + :param remote: Remote site. + :param path: Path on the remote being written to. + :param data: Data to be written. + :param perms: Permissions on the file being written + :param owner: Owner for the file being written + + Both perms and owner are passed directly to chmod. + """ + remote.sudo_write_file(path, data, mode=perms, owner=owner) + + +def copy_file(from_remote, from_path, to_remote, to_path=None): + """ + Copies a file from one remote to another. + """ + if to_path is None: + to_path = from_path + from_remote.run(args=[ + 'sudo', 'scp', '-v', from_path, "{host}:{file}".format( + host=to_remote.name, file=to_path) + ]) + + +def move_file(remote, from_path, to_path, sudo=False, preserve_perms=True): + """ + Move a file from one path to another on a remote site + + If preserve_perms is true, the contents of the destination file (to_path, + which must already exist in this case) are replaced with the contents of the + source file (from_path) and the permissions of to_path are preserved. If + preserve_perms is false, to_path does not need to exist, and is simply + clobbered if it does. + """ + if preserve_perms: + args = [] + if sudo: + args.append('sudo') + args.extend([ + 'stat', + '-c', + '\"%a\"', + to_path + ]) + perms = remote.sh(args).rstrip().strip('\"') + + args = [] + if sudo: + args.append('sudo') + args.extend([ + 'mv', + '--', + from_path, + to_path, + ]) + remote.sh(args) + + if preserve_perms: + # reset the file back to the original permissions + args = [] + if sudo: + args.append('sudo') + args.extend([ + 'chmod', + perms, + to_path, + ]) + remote.sh(args) + + +def delete_file(remote, path, sudo=False, force=False, check=True): + """ + rm a file on a remote site. Use force=True if the call should succeed even + if the file is absent or rm path would otherwise fail. + """ + args = [] + if sudo: + args.append('sudo') + args.extend(['rm']) + if force: + args.extend(['-f']) + args.extend([ + '--', + path, + ]) + remote.sh(args, check_status=check) + + +def remove_lines_from_file(remote, path, line_is_valid_test, + string_to_test_for): + """ + Remove lines from a file. This involves reading the file in, removing + the appropriate lines, saving the file, and then replacing the original + file with the new file. Intermediate files are used to prevent data loss + on when the main site goes up and down. + """ + # read in the specified file + in_data = remote.read_file(path, False).decode() + out_data = "" + + first_line = True + # use the 'line_is_valid_test' function to remove unwanted lines + for line in in_data.split('\n'): + if line_is_valid_test(line, string_to_test_for): + if not first_line: + out_data += '\n' + else: + first_line = False + + out_data += '{line}'.format(line=line) + + else: + log.info('removing line: {bad_line}'.format(bad_line=line)) + + # get a temp file path on the remote host to write to, + # we don't want to blow away the remote file and then have the + # network drop out + temp_file_path = remote.mktemp() + + # write out the data to a temp file + write_file(remote, temp_file_path, out_data) + + # then do a 'mv' to the actual file location + move_file(remote, temp_file_path, path) + + +def append_lines_to_file(remote, path, lines, sudo=False): + """ + Append lines to a file. + """ + remote.write_file(path, lines, append=True, sudo=sudo) + +def prepend_lines_to_file(remote, path, lines, sudo=False): + """ + Prepend lines to a file. + An intermediate file is used in the same manner as in + Remove_lines_from_list. + """ + + temp_file_path = remote.mktemp() + remote.write_file(temp_file_path, lines) + remote.copy_file(path, temp_file_path, append=True, sudo=sudo) + remote.move_file(temp_file_path, path, sudo=sudo) + + +def create_file(remote, path, data="", permissions=str(644), sudo=False): + """ + Create a file on the remote host. + """ + args = [] + if sudo: + args.append('sudo') + args.extend([ + 'touch', + path, + run.Raw('&&') + ]) + if sudo: + args.append('sudo') + args.extend([ + 'chmod', + permissions, + '--', + path + ]) + remote.sh(args) + # now write out the data if any was passed in + if "" != data: + append_lines_to_file(remote, path, data, sudo) + + +def get_file(remote, path, sudo=False, dest_dir='/tmp'): + """ + Get the contents of a remote file. Do not use for large files; use + Remote.get_file() instead. + """ + local_path = remote.get_file(path, sudo=sudo, dest_dir=dest_dir) + with open(local_path, 'rb') as file_obj: + file_data = file_obj.read() + os.remove(local_path) + return file_data + + +def copy_fileobj(src, tarinfo, local_path): + with open(local_path, 'wb') as dest: + shutil.copyfileobj(src, dest) + + +def pull_directory(remote, remotedir, localdir, write_to=copy_fileobj): + """ + Copy a remote directory to a local directory. + + :param remote: the remote object representing the remote host from where + the specified directory is pulled + :param remotedir: the source directory on remote host + :param localdir: the destination directory on localhost + :param write_to: optional function to write the file to localdir. + its signature should be: + func(src: fileobj, + tarinfo: tarfile.TarInfo, + local_path: str) + """ + log.debug('Transferring archived files from %s:%s to %s', + remote.shortname, remotedir, localdir) + if not os.path.exists(localdir): + os.mkdir(localdir) + r = remote.get_tar_stream(remotedir, sudo=True) + tar = tarfile.open(mode='r|gz', fileobj=r.stdout) + while True: + ti = tar.next() + if ti is None: + break + + if ti.isdir(): + # ignore silently; easier to just create leading dirs below + # XXX this mean empty dirs are not transferred + pass + elif ti.isfile(): + sub = safepath.munge(ti.name) + safepath.makedirs(root=localdir, path=os.path.dirname(sub)) + with tar.extractfile(ti) as src: + write_to(src, ti, os.path.join(localdir, sub)) + else: + if ti.isdev(): + type_ = 'device' + elif ti.issym(): + type_ = 'symlink' + elif ti.islnk(): + type_ = 'hard link' + else: + type_ = 'unknown' + log.info('Ignoring tar entry: %r type %r', ti.name, type_) + + +def pull_directory_tarball(remote, remotedir, localfile): + """ + Copy a remote directory to a local tarball. + """ + log.debug('Transferring archived files from %s:%s to %s', + remote.shortname, remotedir, localfile) + remote.get_tar(remotedir, localfile, sudo=True) + + +def get_wwn_id_map(remote, devs): + log.warning("Entering get_wwn_id_map, a deprecated function that will be removed") + return dict((d, d) for d in devs) + + +def get_scratch_devices(remote): + """ + Read the scratch disk list from remote host + """ + devs = [] + try: + file_data = remote.read_file("/scratch_devs").decode() + devs = file_data.split() + except Exception: + devs = remote.sh('ls /dev/[sv]d?').strip().split('\n') + + # Remove root device (vm guests) from the disk list + for dev in devs: + if 'vda' in dev: + devs.remove(dev) + log.warning("Removing root device: %s from device list" % dev) + + log.debug('devs={d}'.format(d=devs)) + + retval = [] + for dev in devs: + try: + # FIXME: Split this into multiple calls. + remote.run( + args=[ + # node exists + 'stat', + dev, + run.Raw('&&'), + # readable + 'sudo', 'dd', 'if=%s' % dev, 'of=/dev/null', 'count=1', + run.Raw('&&'), + # not mounted + run.Raw('!'), + 'mount', + run.Raw('|'), + 'grep', '-q', dev, + ] + ) + retval.append(dev) + except CommandFailedError: + log.debug("get_scratch_devices: %s is in use" % dev) + return retval + + +def wait_until_healthy(ctx, remote, ceph_cluster='ceph', use_sudo=False): + """ + Wait until a Ceph cluster is healthy. Give up after 15min. + """ + testdir = get_testdir(ctx) + # when cluster is setup using ceph-deploy or ansible + # access to admin key is readonly for ceph user + cmd = ['ceph', '--cluster', ceph_cluster, 'health'] + if use_sudo: + cmd.insert(0, 'sudo') + args = ['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir)] + args.extend(cmd) + with safe_while(tries=(900 // 6), action="wait_until_healthy") as proceed: + while proceed(): + out = remote.sh(args, logger=log.getChild('health')) + log.debug('Ceph health: %s', out.rstrip('\n')) + if out.split(None, 1)[0] == 'HEALTH_OK': + break + time.sleep(1) + + +def wait_until_osds_up(ctx, cluster, remote, ceph_cluster='ceph'): + """Wait until all Ceph OSDs are booted.""" + num_osds = num_instances_of_type(cluster, 'osd', ceph_cluster) + testdir = get_testdir(ctx) + with safe_while(sleep=6, tries=90) as proceed: + while proceed(): + daemons = ctx.daemons.iter_daemons_of_role('osd', ceph_cluster) + for daemon in daemons: + daemon.check_status() + out = remote.sh( + [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', + '--cluster', ceph_cluster, + 'osd', 'dump', '--format=json' + ], + logger=log.getChild('health'), + ) + j = json.loads('\n'.join(out.split('\n')[1:])) + up = sum(1 for o in j['osds'] if 'up' in o['state']) + log.debug('%d of %d OSDs are up' % (up, num_osds)) + if up == num_osds: + break + + +def reboot(node, timeout=300, interval=30): + """ + Reboots a given system, then waits for it to come back up and + re-establishes the ssh connection. + + :param node: The teuthology.orchestra.remote.Remote object of the node + :param timeout: The amount of time, in seconds, after which to give up + waiting for the node to return + :param interval: The amount of time, in seconds, to wait between attempts + to re-establish with the node. This should not be set to + less than maybe 10, to make sure the node actually goes + down first. + """ + log.info("Rebooting {host}...".format(host=node.hostname)) + node.run(args=['sudo', 'shutdown', '-r', 'now']) + reboot_start_time = time.time() + while time.time() - reboot_start_time < timeout: + time.sleep(interval) + if node.is_online or node.reconnect(): + return + raise RuntimeError( + "{host} did not come up after reboot within {time}s".format( + host=node.hostname, time=timeout)) + + +def reconnect(ctx, timeout, remotes=None): + """ + Connect to all the machines in ctx.cluster. + + Presumably, some of them won't be up. Handle this + by waiting for them, unless the wait time exceeds + the specified timeout. + + ctx needs to contain the cluster of machines you + wish it to try and connect to, as well as a config + holding the ssh keys for each of them. As long as it + contains this data, you can construct a context + that is a subset of your full cluster. + """ + log.info('Re-opening connections...') + starttime = time.time() + + if remotes: + need_reconnect = remotes + else: + need_reconnect = list(ctx.cluster.remotes.keys()) + + while need_reconnect: + for remote in need_reconnect: + log.info('trying to connect to %s', remote.name) + success = remote.reconnect() + if not success: + if time.time() - starttime > timeout: + raise RuntimeError("Could not reconnect to %s" % + remote.name) + else: + need_reconnect.remove(remote) + + log.debug('waited {elapsed}'.format( + elapsed=str(time.time() - starttime))) + time.sleep(1) + + +def get_clients(ctx, roles): + """ + return all remote roles that are clients. + """ + for role in roles: + assert isinstance(role, str) + assert 'client.' in role + _, _, id_ = split_role(role) + (remote,) = ctx.cluster.only(role).remotes.keys() + yield (id_, remote) + + +def get_user(): + """ + Return the username in the format user@host. + """ + return getpass.getuser() + '@' + socket.gethostname() + + +def get_mon_names(ctx, cluster='ceph'): + """ + :returns: a list of monitor names + """ + is_mon = is_type('mon', cluster) + host_mons = [[role for role in roles if is_mon(role)] + for roles in ctx.cluster.remotes.values()] + return [mon for mons in host_mons for mon in mons] + + +def get_first_mon(ctx, config, cluster='ceph'): + """ + return the "first" mon role (alphanumerically, for lack of anything better) + """ + mons = get_mon_names(ctx, cluster) + if mons: + return sorted(mons)[0] + assert False, 'no mon for cluster found' + + +def replace_all_with_clients(cluster, config): + """ + Converts a dict containing a key all to one + mapping all clients to the value of config['all'] + """ + assert isinstance(config, dict), 'config must be a dict' + if 'all' not in config: + return config + norm_config = {} + assert len(config) == 1, \ + "config cannot have 'all' and specific clients listed" + for client in all_roles_of_type(cluster, 'client'): + norm_config['client.{id}'.format(id=client)] = config['all'] + return norm_config + + +def deep_merge(a, b): + """ + Deep Merge. If a and b are both lists, all elements in b are + added into a. If a and b are both dictionaries, elements in b are + recursively added to a. + :param a: object items will be merged into + :param b: object items will be merged from + """ + if b is None: + return a + elif isinstance(a, list): + assert isinstance(b, list) + a.extend(b) + return a + elif isinstance(a, dict): + assert isinstance(b, dict) or isinstance(b, MappingProxyType) + for (k, v) in b.items(): + a[k] = deep_merge(a.get(k), v) + return a + elif isinstance(b, dict) or isinstance(b, list): + return deep_merge(b.__class__(), b) + elif isinstance(b, MappingProxyType): + return deep_merge(dict(), b) + else: + return b + + +def get_valgrind_args(testdir, name, preamble, v, exit_on_first_error=True): + """ + Build a command line for running valgrind. + + testdir - test results directory + name - name of daemon (for naming hte log file) + preamble - stuff we should run before valgrind + v - valgrind arguments + """ + if v is None: + return preamble + if not isinstance(v, list): + v = [v] + + # https://tracker.ceph.com/issues/44362 + preamble.extend([ + 'env', 'OPENSSL_ia32cap=~0x1000000000000000', + ]) + + val_path = '/var/log/ceph/valgrind' + if '--tool=memcheck' in v or '--tool=helgrind' in v: + extra_args = [ + 'valgrind', + '--trace-children=no', + '--child-silent-after-fork=yes', + '--soname-synonyms=somalloc=*tcmalloc*', + '--num-callers=50', + '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir), + '--xml=yes', + '--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name), + '--time-stamp=yes', + '--vgdb=yes', + ] + else: + extra_args = [ + 'valgrind', + '--trace-children=no', + '--child-silent-after-fork=yes', + '--soname-synonyms=somalloc=*tcmalloc*', + '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir), + '--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name), + '--time-stamp=yes', + '--vgdb=yes', + ] + if exit_on_first_error: + extra_args.extend([ + # at least Valgrind 3.14 is required + '--exit-on-first-error=yes', + '--error-exitcode=42', + ]) + args = [ + 'cd', testdir, + run.Raw('&&'), + ] + preamble + extra_args + v + log.debug('running %s under valgrind with args %s', name, args) + return args + + +def ssh_keyscan(hostnames, _raise=True): + """ + Fetch the SSH public key of one or more hosts + + :param hostnames: A list of hostnames, or a dict keyed by hostname + :param _raise: Whether to raise an exception if not all keys are retrieved + :returns: A dict keyed by hostname, with the host keys as values + """ + if not isinstance(hostnames, list) and not isinstance(hostnames, dict): + raise TypeError("'hostnames' must be a list") + hostnames = [canonicalize_hostname(name, user=None) for name in + hostnames] + keys_dict = dict() + for hostname in hostnames: + with safe_while( + sleep=1, + tries=5 if _raise else 1, + _raise=_raise, + action="ssh_keyscan " + hostname, + ) as proceed: + while proceed(): + key = _ssh_keyscan(hostname) + if key: + keys_dict[hostname] = key + break + if len(keys_dict) != len(hostnames): + missing = set(hostnames) - set(keys_dict.keys()) + msg = "Unable to scan these host keys: %s" % ' '.join(missing) + if not _raise: + log.warning(msg) + else: + raise RuntimeError(msg) + return keys_dict + + +def _ssh_keyscan(hostname): + """ + Fetch the SSH public key of one or more hosts + + :param hostname: The hostname + :returns: The host key + """ + args = ['ssh-keyscan', '-T', '1', '-t', 'rsa', hostname] + p = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + p.wait() + for line in p.stderr: + line = line.decode() + line = line.strip() + if line and not line.startswith('#'): + log.error(line) + for line in p.stdout: + host, key = line.strip().decode().split(' ', 1) + return key + + +def ssh_keyscan_wait(hostname): + """ + Run ssh-keyscan against a host, return True if it succeeds, + False otherwise. Try again if ssh-keyscan timesout. + :param hostname: on which ssh-keyscan is run + """ + with safe_while(sleep=6, tries=100, _raise=False, + action="ssh_keyscan_wait " + hostname) as proceed: + success = False + while proceed(): + key = _ssh_keyscan(hostname) + if key: + success = True + break + log.info("try ssh_keyscan again for " + str(hostname)) + return success + +def stop_daemons_of_type(ctx, type_, cluster='ceph'): + """ + :param type_: type of daemons to be stopped. + """ + log.info('Shutting down %s daemons...' % type_) + exc = None + for daemon in ctx.daemons.iter_daemons_of_role(type_, cluster): + try: + daemon.stop() + except (CommandFailedError, + CommandCrashedError, + ConnectionLostError) as e: + exc = e + log.exception('Saw exception from %s.%s', daemon.role, daemon.id_) + if exc is not None: + raise exc + + +def get_system_type(remote, distro=False, version=False): + """ + If distro, return distro. + If version, return version (lsb_release -rs) + If both, return both. + If neither, return 'deb' or 'rpm' if distro is known to be one of those + Finally, if unknown, return the unfiltered distro (from lsb_release -is) + """ + system_value = remote.sh('sudo lsb_release -is').strip() + log.debug("System to be installed: %s" % system_value) + if version: + version = remote.sh('sudo lsb_release -rs').strip() + if distro and version: + return system_value.lower(), version + if distro: + return system_value.lower() + if version: + return version + if system_value in ['Ubuntu', 'Debian']: + return "deb" + if system_value in ['CentOS', 'Fedora', 'RedHatEnterpriseServer', + 'RedHatEnterprise', + 'CentOSStream', + 'openSUSE', 'openSUSE project', 'SUSE', 'SUSE LINUX']: + return "rpm" + return system_value + +def get_pkg_type(os_type): + if os_type in ('centos', 'fedora', 'opensuse', 'rhel', 'sle'): + return 'rpm' + else: + return 'deb' + +def get_distro(ctx): + """ + Get the name of the distro that we are using (usually the os_type). + """ + os_type = None + if ctx.os_type: + return ctx.os_type + + try: + os_type = ctx.config.get('os_type', None) + except AttributeError: + pass + + # if os_type is None, return the default of ubuntu + return os_type or "ubuntu" + + +def get_distro_version(ctx): + """ + Get the verstion of the distro that we are using (release number). + """ + distro = get_distro(ctx) + if ctx.os_version is not None: + return str(ctx.os_version) + try: + os_version = ctx.config.get('os_version', DEFAULT_OS_VERSION[distro]) + except AttributeError: + os_version = DEFAULT_OS_VERSION[distro] + return str(os_version) + + +def get_multi_machine_types(machinetype): + """ + Converts machine type string to list based on common deliminators + """ + machinetypes = [] + machine_type_deliminator = [',', ' ', '\t'] + for deliminator in machine_type_deliminator: + if deliminator in machinetype: + machinetypes = machinetype.split(deliminator) + break + if not machinetypes: + machinetypes.append(machinetype) + return machinetypes + + +def is_in_dict(searchkey, searchval, d): + """ + Test if searchkey/searchval are in dictionary. searchval may + itself be a dict, in which case, recurse. searchval may be + a subset at any nesting level (that is, all subkeys in searchval + must be found in d at the same level/nest position, but searchval + is not required to fully comprise d[searchkey]). + + >>> is_in_dict('a', 'foo', {'a':'foo', 'b':'bar'}) + True + + >>> is_in_dict( + ... 'a', + ... {'sub1':'key1', 'sub2':'key2'}, + ... {'a':{'sub1':'key1', 'sub2':'key2', 'sub3':'key3'}} + ... ) + True + + >>> is_in_dict('a', 'foo', {'a':'bar', 'b':'foo'}) + False + + >>> is_in_dict('a', 'foo', {'a':{'a': 'foo'}}) + False + """ + val = d.get(searchkey, None) + if isinstance(val, dict) and isinstance(searchval, dict): + for foundkey, foundval in searchval.items(): + if not is_in_dict(foundkey, foundval, val): + return False + return True + else: + return searchval == val + + +def sh(command, log_limit=1024, cwd=None, env=None): + """ + Run the shell command and return the output in ascii (stderr and + stdout). If the command fails, raise an exception. The command + and its output are logged, on success and on error. + """ + log.debug(":sh: " + command) + proc = subprocess.Popen( + args=command, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + shell=True, + bufsize=1) + lines = [] + truncated = False + with proc.stdout: + for line in proc.stdout: + line = line.decode() + lines.append(line) + line = line.rstrip() + if len(line) > log_limit: + truncated = True + log.debug(line[:log_limit] + + "... (truncated to the first " + str(log_limit) + + " characters)") + else: + log.debug(line) + output = "".join(lines) + if proc.wait() != 0: + if truncated: + log.error(command + " replay full stdout/stderr" + " because an error occurred and some of" + " it was truncated") + log.error(output) + raise subprocess.CalledProcessError( + returncode=proc.returncode, + cmd=command, + output=output + ) + return output + + +def add_remote_path(ctx, local_dir, remote_dir): + """ + Add key/value pair (local_dir: remote_dir) to job's info.yaml. + These key/value pairs are read to archive them in case of job timeout. + """ + if ctx.archive is None: + return + with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file: + info_yaml = yaml.safe_load(info_file) + info_file.seek(0) + if 'archive' in info_yaml: + info_yaml['archive'][local_dir] = remote_dir + else: + info_yaml['archive'] = {local_dir: remote_dir} + yaml.safe_dump(info_yaml, info_file, default_flow_style=False) + + +def archive_logs(ctx, remote_path, log_path): + """ + Archive directories from all nodes in a cliuster. It pulls all files in + remote_path dir to job's archive dir under log_path dir. + """ + if ctx.archive is None: + return + path = os.path.join(ctx.archive, 'remote') + os.makedirs(path, exist_ok=True) + for remote in ctx.cluster.remotes.keys(): + sub = os.path.join(path, remote.shortname) + os.makedirs(sub, exist_ok=True) + try: + pull_directory(remote, remote_path, os.path.join(sub, log_path)) + except ReadError: + pass + + +def compress_logs(ctx, remote_dir): + """ + Compress all files in remote_dir from all nodes in a cluster. + """ + log.info('Compressing logs...') + run.wait( + ctx.cluster.run( + args=(f"sudo find {remote_dir} -name *.log -print0 | " + f"sudo xargs -0 --no-run-if-empty -- gzip --"), + wait=False, + ), + ) diff --git a/teuthology/nuke/__init__.py b/teuthology/nuke/__init__.py new file mode 100644 index 0000000000..3ec78bb824 --- /dev/null +++ b/teuthology/nuke/__init__.py @@ -0,0 +1,366 @@ +import argparse +import datetime +import json +import logging +import os +import subprocess + +import yaml + +import teuthology +from teuthology import provision +from teuthology.lock.ops import unlock_one +from teuthology.lock.query import is_vm, list_locks, \ + find_stale_locks, get_status +from teuthology.lock.util import locked_since_seconds +from teuthology.nuke.actions import ( + check_console, clear_firewall, shutdown_daemons, remove_installed_packages, + reboot, remove_osd_mounts, remove_osd_tmpfs, kill_hadoop, + remove_ceph_packages, synch_clocks, unlock_firmware_repo, + remove_configuration_files, undo_multipath, reset_syslog_dir, + remove_ceph_data, remove_testing_tree, remove_yum_timedhosts, + kill_valgrind, +) +from teuthology.config import config, FakeNamespace +from teuthology.misc import ( + canonicalize_hostname, config_file, decanonicalize_hostname, merge_configs, + get_user, sh +) +from teuthology.openstack import OpenStack, OpenStackInstance, enforce_json_dictionary +from teuthology.orchestra.remote import Remote +from teuthology.parallel import parallel +from teuthology.task.internal import check_lock, add_remotes, connect + +log = logging.getLogger(__name__) + + +def openstack_volume_id(volume): + return (volume.get('ID') or volume['id']) + + +def openstack_volume_name(volume): + return (volume.get('Display Name') or + volume.get('display_name') or + volume.get('Name') or + volume.get('name') or "") + + +def stale_openstack(ctx): + targets = dict(map(lambda i: (i['ID'], i), + OpenStack.list_instances())) + nodes = list_locks(keyed_by_name=True, locked=True) + stale_openstack_instances(ctx, targets, nodes) + stale_openstack_nodes(ctx, targets, nodes) + stale_openstack_volumes(ctx, OpenStack.list_volumes()) + if not ctx.dry_run: + openstack_remove_again() + +# +# A delay, in seconds, that is significantly longer than +# any kind of OpenStack server creation / deletion / etc. +# +OPENSTACK_DELAY = 30 * 60 + + +def stale_openstack_instances(ctx, instances, locked_nodes): + for (instance_id, instance) in instances.items(): + i = OpenStackInstance(instance_id) + if not i.exists(): + log.debug("stale-openstack: {instance} disappeared, ignored" + .format(instance=instance_id)) + continue + if (i.get_created() > + config['max_job_time'] + OPENSTACK_DELAY): + log.info( + "stale-openstack: destroying instance {instance}" + " because it was created {created} seconds ago" + " which is older than" + " max_job_time {max_job_time} + {delay}" + .format(instance=i['name'], + created=i.get_created(), + max_job_time=config['max_job_time'], + delay=OPENSTACK_DELAY)) + if not ctx.dry_run: + i.destroy() + continue + name = canonicalize_hostname(i['name'], user=None) + if i.get_created() > OPENSTACK_DELAY and name not in locked_nodes: + log.info("stale-openstack: destroying instance {instance}" + " because it was created {created} seconds ago" + " is older than {delay}s and it is not locked" + .format(instance=i['name'], + created=i.get_created(), + delay=OPENSTACK_DELAY)) + if not ctx.dry_run: + i.destroy() + continue + log.debug("stale-openstack: instance " + i['name'] + " OK") + + +def openstack_delete_volume(id): + OpenStack().run("volume delete " + id + " || true") + + +def stale_openstack_volumes(ctx, volumes): + now = datetime.datetime.now() + for volume in volumes: + volume_id = openstack_volume_id(volume) + try: + volume = json.loads(OpenStack().run("volume show -f json " + + volume_id)) + except subprocess.CalledProcessError: + log.debug("stale-openstack: {id} disappeared, ignored" + .format(id=volume_id)) + continue + volume_name = openstack_volume_name(volume) + enforce_json_dictionary(volume) + created_at = datetime.datetime.strptime( + volume['created_at'], '%Y-%m-%dT%H:%M:%S.%f') + created = (now - created_at).total_seconds() + if created > config['max_job_time'] + OPENSTACK_DELAY: + log.info( + "stale-openstack: destroying volume {volume}({id})" + " because it was created {created} seconds ago" + " which is older than" + " max_job_time {max_job_time} + {delay}" + .format(volume=volume_name, + id=volume_id, + created=created, + max_job_time=config['max_job_time'], + delay=OPENSTACK_DELAY)) + if not ctx.dry_run: + openstack_delete_volume(volume_id) + continue + log.debug("stale-openstack: volume " + volume_id + " OK") + + +def stale_openstack_nodes(ctx, instances, locked_nodes): + names = set([ i['Name'] for i in instances.values() ]) + for (name, node) in locked_nodes.items(): + name = decanonicalize_hostname(name) + if node['machine_type'] != 'openstack': + continue + if (name not in names and + locked_since_seconds(node) > OPENSTACK_DELAY): + log.info("stale-openstack: unlocking node {name} unlocked" + " because it was created {created}" + " seconds ago which is older than {delay}" + " and it has no instance" + .format(name=name, + created=locked_since_seconds(node), + delay=OPENSTACK_DELAY)) + if not ctx.dry_run: + unlock_one(ctx, name, node['locked_by']) + continue + log.debug("stale-openstack: node " + name + " OK") + + +def openstack_remove_again(): + """ + Volumes and servers with REMOVE-ME in the name are leftover + that failed to be removed. It is not uncommon for a failed removal + to succeed later on. + """ + sh(""" + openstack server list --name REMOVE-ME --column ID --format value | + xargs --no-run-if-empty --max-args 1 -P20 openstack server delete --wait + true + """) + volumes = json.loads(OpenStack().run("volume list -f json --long")) + remove_me = [openstack_volume_id(v) for v in volumes + if 'REMOVE-ME' in openstack_volume_name(v)] + for i in remove_me: + log.info("Trying to remove stale volume %s" % i) + openstack_delete_volume(i) + + +def main(args): + ctx = FakeNamespace(args) + if ctx.verbose: + teuthology.log.setLevel(logging.DEBUG) + + info = {} + if ctx.archive: + ctx.config = config_file(ctx.archive + '/config.yaml') + ifn = os.path.join(ctx.archive, 'info.yaml') + if os.path.exists(ifn): + with open(ifn, 'r') as fd: + info = yaml.safe_load(fd.read()) + if not ctx.pid: + ctx.pid = info.get('pid') + if not ctx.pid: + ctx.pid = int(open(ctx.archive + '/pid').read().rstrip('\n')) + if not ctx.owner: + ctx.owner = info.get('owner') + if not ctx.owner: + ctx.owner = open(ctx.archive + '/owner').read().rstrip('\n') + + if ctx.targets: + ctx.config = merge_configs(ctx.targets) + + if ctx.stale: + stale_nodes = find_stale_locks(ctx.owner) + targets = dict() + for node in stale_nodes: + targets[node['name']] = node['ssh_pub_key'] + ctx.config = dict(targets=targets) + + if ctx.stale_openstack: + stale_openstack(ctx) + return + + log.info( + '\n '.join( + ['targets:', ] + yaml.safe_dump( + ctx.config['targets'], + default_flow_style=False).splitlines())) + + if ctx.dry_run: + log.info("Not actually nuking anything since --dry-run was passed") + return + + if ctx.owner is None: + ctx.owner = get_user() + + if ctx.pid: + if ctx.archive: + log.info('Killing teuthology process at pid %d', ctx.pid) + os.system('grep -q %s /proc/%d/cmdline && sudo kill -9 %d' % ( + ctx.archive, + ctx.pid, + ctx.pid)) + else: + subprocess.check_call(["kill", "-9", str(ctx.pid)]) + + nuke(ctx, ctx.unlock, ctx.synch_clocks, ctx.noipmi, ctx.keep_logs, not ctx.no_reboot) + + +def nuke(ctx, should_unlock, sync_clocks=True, noipmi=False, keep_logs=False, should_reboot=True): + if 'targets' not in ctx.config: + return + total_unnuked = {} + log.info('Checking targets against current locks') + with parallel() as p: + for target, hostkey in ctx.config['targets'].items(): + status = get_status(target) + if ctx.name and ctx.name not in status['description']: + total_unnuked[target] = hostkey + log.info( + f"Not nuking {target} because description doesn't match: " + f"{ctx.name} != {status['description']}" + ) + continue + elif status.get('up') is False: + total_unnuked[target] = hostkey + log.info(f"Not nuking {target} because it is down") + continue + p.spawn( + nuke_one, + ctx, + {target: hostkey}, + should_unlock, + sync_clocks, + ctx.config.get('check-locks', True), + noipmi, + keep_logs, + should_reboot, + ) + for unnuked in p: + if unnuked: + total_unnuked.update(unnuked) + if total_unnuked: + log.error('Could not nuke the following targets:\n' + + '\n '.join(['targets:', ] + + yaml.safe_dump( + total_unnuked, + default_flow_style=False).splitlines())) + + +def nuke_one(ctx, target, should_unlock, synch_clocks, + check_locks, noipmi, keep_logs, should_reboot): + ret = None + ctx = argparse.Namespace( + config=dict(targets=target), + owner=ctx.owner, + check_locks=check_locks, + synch_clocks=synch_clocks, + teuthology_config=config.to_dict(), + name=ctx.name, + noipmi=noipmi, + ) + try: + nuke_helper(ctx, should_unlock, keep_logs, should_reboot) + except Exception: + log.exception('Could not nuke %s' % target) + # not re-raising the so that parallel calls aren't killed + ret = target + else: + if should_unlock: + unlock_one(ctx, list(target.keys())[0], ctx.owner) + return ret + + +def nuke_helper(ctx, should_unlock, keep_logs, should_reboot): + # ensure node is up with ipmi + (target,) = ctx.config['targets'].keys() + host = target.split('@')[-1] + shortname = host.split('.')[0] + if should_unlock: + if is_vm(shortname): + return + log.debug('shortname: %s' % shortname) + remote = Remote(host) + if ctx.check_locks: + # does not check to ensure if the node is 'up' + # we want to be able to nuke a downed node + check_lock.check_lock(ctx, None, check_up=False) + status = get_status(host) + if status['machine_type'] in provision.fog.get_types(): + remote.console.power_off() + return + elif status['machine_type'] in provision.pelagos.get_types(): + provision.pelagos.park_node(host) + return + elif remote.is_container: + remote.run( + args=['sudo', '/testnode_stop.sh'], + check_status=False, + ) + return + if (not ctx.noipmi and 'ipmi_user' in config and + 'vpm' not in shortname): + try: + check_console(host) + except Exception: + log.exception('') + log.info("Will attempt to connect via SSH") + remote = Remote(host) + remote.connect() + add_remotes(ctx, None) + connect(ctx, None) + clear_firewall(ctx) + shutdown_daemons(ctx) + kill_valgrind(ctx) + # Try to remove packages before reboot + remove_installed_packages(ctx) + remotes = ctx.cluster.remotes.keys() + if should_reboot: + reboot(ctx, remotes) + # shutdown daemons again incase of startup + shutdown_daemons(ctx) + remove_osd_mounts(ctx) + remove_osd_tmpfs(ctx) + kill_hadoop(ctx) + remove_ceph_packages(ctx) + synch_clocks(remotes) + unlock_firmware_repo(ctx) + remove_configuration_files(ctx) + undo_multipath(ctx) + reset_syslog_dir(ctx) + remove_ceph_data(ctx) + if not keep_logs: + remove_testing_tree(ctx) + remove_yum_timedhosts(ctx) + # Once again remove packages after reboot + remove_installed_packages(ctx) + log.info('Installed packages removed.') diff --git a/teuthology/nuke/actions.py b/teuthology/nuke/actions.py new file mode 100644 index 0000000000..854ca27d48 --- /dev/null +++ b/teuthology/nuke/actions.py @@ -0,0 +1,460 @@ +import logging +import time + +from teuthology.misc import get_testdir, reconnect +from teuthology.orchestra import run +from teuthology.orchestra.remote import Remote +from teuthology.task import install as install_task + + +log = logging.getLogger(__name__) + + +def clear_firewall(ctx): + """ + Remove any iptables rules created by teuthology. These rules are + identified by containing a comment with 'teuthology' in it. Non-teuthology + firewall rules are unaffected. + """ + log.info("Clearing teuthology firewall rules...") + ctx.cluster.run( + args=[ + "sudo", "sh", "-c", + "iptables-save | grep -v teuthology | iptables-restore" + ], + ) + log.info("Cleared teuthology firewall rules.") + + +def shutdown_daemons(ctx): + log.info('Unmounting ceph-fuse and killing daemons...') + ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'), + 'sudo', 'service', 'ceph', 'stop', run.Raw('||'), + 'sudo', 'systemctl', 'stop', 'ceph.target'], + check_status=False, timeout=180) + ctx.cluster.run( + args=[ + 'if', 'grep', '-q', 'ceph-fuse', '/etc/mtab', run.Raw(';'), + 'then', + 'grep', 'ceph-fuse', '/etc/mtab', run.Raw('|'), + 'grep', '-o', " /.* fuse", run.Raw('|'), + 'grep', '-o', "/.* ", run.Raw('|'), + 'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'), + 'fi', + run.Raw(';'), + 'if', 'grep', '-q', 'rbd-fuse', '/etc/mtab', run.Raw(';'), + 'then', + 'grep', 'rbd-fuse', '/etc/mtab', run.Raw('|'), + 'grep', '-o', " /.* fuse", run.Raw('|'), + 'grep', '-o', "/.* ", run.Raw('|'), + 'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'), + 'fi', + run.Raw(';'), + 'sudo', + 'killall', + '--quiet', + 'ceph-mon', + 'ceph-osd', + 'ceph-mds', + 'ceph-mgr', + 'ceph-fuse', + 'ceph-disk', + 'radosgw', + 'ceph_test_rados', + 'rados', + 'rbd-fuse', + 'apache2', + run.Raw('||'), + 'true', # ignore errors from ceph binaries not being found + ], + timeout=120, + ) + log.info('All daemons killed.') + + +def kill_hadoop(ctx): + log.info("Terminating Hadoop services...") + ctx.cluster.run(args=[ + "pkill", "-f", "-KILL", "java.*hadoop", + ], + check_status=False, + timeout=60 + ) + + +def kill_valgrind(ctx): + # http://tracker.ceph.com/issues/17084 + ctx.cluster.run( + args=['sudo', 'pkill', '-f', '-9', 'valgrind.bin'], + check_status=False, + timeout=20, + ) + + +def remove_osd_mounts(ctx): + """ + unmount any osd data mounts (scratch disks) + """ + log.info('Unmount any osd data directories...') + ctx.cluster.run( + args=[ + 'grep', + '/var/lib/ceph/osd/', + '/etc/mtab', + run.Raw('|'), + 'awk', '{print $2}', run.Raw('|'), + 'xargs', '-r', + 'sudo', 'umount', '-l', run.Raw(';'), + 'true' + ], + timeout=120 + ) + + +def remove_osd_tmpfs(ctx): + """ + unmount tmpfs mounts + """ + log.info('Unmount any osd tmpfs dirs...') + ctx.cluster.run( + args=[ + 'egrep', 'tmpfs\s+/mnt', '/etc/mtab', run.Raw('|'), + 'awk', '{print $2}', run.Raw('|'), + 'xargs', '-r', + 'sudo', 'umount', run.Raw(';'), + 'true' + ], + timeout=120 + ) + + +def stale_kernel_mount(remote): + proc = remote.run( + args=[ + 'sudo', 'find', + '/sys/kernel/debug/ceph', + '-mindepth', '1', + run.Raw('!'), + '-path', '/sys/kernel/debug/ceph/meta', + run.Raw('!'), + '-path', '/sys/kernel/debug/ceph/meta/client_features', + '-type', 'd', + run.Raw('|'), + 'read' + ], + check_status=False + ) + return proc.exitstatus == 0 + + +def reboot(ctx, remotes): + for remote in remotes: + if stale_kernel_mount(remote): + log.warning('Stale kernel mount on %s!', remote.name) + log.info('force/no-sync rebooting %s', remote.name) + # -n is ignored in systemd versions through v229, which means this + # only works on trusty -- on 7.3 (v219) and xenial (v229) reboot -n + # still calls sync(). + # args = ['sync', run.Raw('&'), + # 'sleep', '5', run.Raw(';'), + # 'sudo', 'reboot', '-f', '-n'] + args = ['for', 'sysrq', 'in', 's', 'u', 'b', run.Raw(';'), + 'do', 'echo', run.Raw('$sysrq'), run.Raw('|'), + 'sudo', 'tee', '/proc/sysrq-trigger', run.Raw(';'), + 'done'] + else: + log.info('rebooting %s', remote.name) + args = ['sudo', 'reboot'] + try: + remote.run(args=args, wait=False) + except Exception: + log.exception('ignoring exception during reboot command') + # we just ignore these procs because reboot -f doesn't actually + # send anything back to the ssh client! + if remotes: + log.info('waiting for nodes to reboot') + time.sleep(8) # if we try and reconnect too quickly, it succeeds! + reconnect(ctx, 480) # allow 8 minutes for the reboots + + +def reset_syslog_dir(ctx): + log.info('Resetting syslog output locations...') + nodes = {} + for remote in ctx.cluster.remotes.keys(): + proc = remote.run( + args=[ + 'if', 'test', '-e', '/etc/rsyslog.d/80-cephtest.conf', + run.Raw(';'), + 'then', + 'sudo', 'rm', '-f', '--', '/etc/rsyslog.d/80-cephtest.conf', + run.Raw('&&'), + 'sudo', 'service', 'rsyslog', 'restart', + run.Raw(';'), + 'fi', + run.Raw(';'), + ], + timeout=60, + ) + nodes[remote.name] = proc + + for name, proc in nodes.items(): + log.info('Waiting for %s to restart syslog...', name) + proc.wait() + + +def dpkg_configure(ctx): + for remote in ctx.cluster.remotes.keys(): + if remote.os.package_type != 'deb': + continue + log.info( + 'Waiting for dpkg --configure -a and apt-get -f install...') + remote.run( + args=[ + 'sudo', 'dpkg', '--configure', '-a', + run.Raw(';'), + 'sudo', 'DEBIAN_FRONTEND=noninteractive', + 'apt-get', '-y', '--force-yes', '-f', 'install', + run.Raw('||'), + ':', + ], + timeout=180, + check_status=False, + ) + + +def remove_yum_timedhosts(ctx): + # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1233329 + log.info("Removing yum timedhosts files...") + for remote in ctx.cluster.remotes.keys(): + if remote.os.package_type != 'rpm': + continue + remote.run( + args="sudo find /var/cache/yum -name 'timedhosts' -exec rm {} \;", + check_status=False, timeout=180 + ) + + +def remove_ceph_packages(ctx): + """ + remove ceph and ceph dependent packages by force + force is needed since the node's repo might have changed and + in many cases autocorrect will not work due to missing packages + due to repo changes + """ + log.info("Force remove ceph packages") + ceph_packages_to_remove = ['ceph-common', 'ceph-mon', 'ceph-osd', + 'libcephfs1', 'libcephfs2', + 'librados2', 'librgw2', 'librbd1', 'python-rgw', + 'ceph-selinux', 'python-cephfs', 'ceph-base', + 'python-rbd', 'python-rados', 'ceph-mds', + 'ceph-mgr', 'libcephfs-java', 'libcephfs-jni', + 'ceph-deploy', 'libapache2-mod-fastcgi' + ] + pkgs = str.join(' ', ceph_packages_to_remove) + for remote in ctx.cluster.remotes.keys(): + if remote.os.package_type == 'rpm': + log.info("Remove any broken repos") + dist_release = remote.os.name + remote.run( + args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*ceph*")], + check_status=False + ) + remote.run( + args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*fcgi*")], + check_status=False, + ) + remote.run( + args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*samba*")], + check_status=False, + ) + remote.run( + args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*nfs-ganesha*")], + check_status=False, + ) + remote.run( + args=['sudo', 'rpm', '--rebuilddb'] + ) + if dist_release in ['opensuse', 'sle']: + remote.sh('sudo zypper clean') + log.info('Remove any ceph packages') + remote.sh('sudo zypper remove --non-interactive', + check_status=False + ) + else: + remote.sh('sudo yum clean all') + log.info('Remove any ceph packages') + remote.sh('sudo yum remove -y', check_status=False) + else: + log.info("Remove any broken repos") + remote.run( + args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*ceph*")], + check_status=False, + ) + remote.run( + args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*samba*")], + check_status=False, + ) + remote.run( + args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*nfs-ganesha*")], + check_status=False, + ) + log.info("Autoclean") + remote.run( + args=['sudo', 'apt-get', 'autoclean'], + check_status=False, + ) + log.info('Remove any ceph packages') + remote.run( + args=[ + 'sudo', 'dpkg', '--remove', '--force-remove-reinstreq', + run.Raw(pkgs) + ], + check_status=False + ) + log.info("Autoclean") + remote.run( + args=['sudo', 'apt-get', 'autoclean'] + ) + + +def remove_installed_packages(ctx): + dpkg_configure(ctx) + conf = dict( + project='ceph', + debuginfo='true', + ) + packages = install_task.get_package_list(ctx, conf) + debs = packages['deb'] + \ + ['salt-common', 'salt-minion', 'calamari-server', + 'python-rados', 'multipath-tools'] + rpms = packages['rpm'] + \ + ['salt-common', 'salt-minion', 'calamari-server', + 'multipath-tools', 'device-mapper-multipath'] + install_task.remove_packages( + ctx, + conf, + dict( + deb=debs, + rpm=rpms, + ) + ) + install_task.remove_sources(ctx, conf) + + +def remove_ceph_data(ctx): + log.info("Removing any stale ceph data...") + ctx.cluster.run( + args=[ + 'sudo', 'rm', '-rf', '/etc/ceph', + run.Raw('/var/run/ceph*'), + ], + ) + + +def remove_testing_tree(ctx): + log.info('Clearing filesystem of test data...') + ctx.cluster.run( + args=[ + 'sudo', 'rm', '-rf', get_testdir(ctx), + # just for old time's sake + run.Raw('&&'), + 'sudo', 'rm', '-rf', '/tmp/cephtest', + run.Raw('&&'), + 'sudo', 'rm', '-rf', '/home/ubuntu/cephtest', + ], + ) + + +def remove_configuration_files(ctx): + """ + Goes through a list of commonly used configuration files used for testing + that should not be left behind. + + For example, sometimes ceph-deploy may be configured via + ``~/.cephdeploy.conf`` to alter how it handles installation by specifying + a default section in its config with custom locations. + """ + ctx.cluster.run( + args=[ + 'rm', '-f', '/home/ubuntu/.cephdeploy.conf' + ], + timeout=30 + ) + + +def undo_multipath(ctx): + """ + Undo any multipath device mappings created, an + remove the packages/daemon that manages them so they don't + come back unless specifically requested by the test. + """ + log.info('Removing any multipath config/pkgs...') + for remote in ctx.cluster.remotes.keys(): + remote.run( + args=[ + 'sudo', 'multipath', '-F', + ], + check_status=False, + timeout=60 + ) + + +def synch_clocks(remotes): + log.info('Synchronizing clocks...') + for remote in remotes: + remote.run( + args=[ + 'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'), + 'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'), + 'sudo', 'systemctl', 'stop', 'chronyd.service', + run.Raw('&&'), + 'sudo', 'ntpdate-debian', run.Raw('||'), + 'sudo', 'ntp', '-gq', run.Raw('||'), + 'sudo', 'ntpd', '-gq', run.Raw('||'), + 'sudo', 'chronyc', 'sources', + run.Raw('&&'), + 'sudo', 'hwclock', '--systohc', '--utc', + run.Raw('&&'), + 'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'), + 'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'), + 'sudo', 'systemctl', 'start', 'chronyd.service', + run.Raw('||'), + 'true', # ignore errors; we may be racing with ntpd startup + ], + timeout=60, + ) + + +def unlock_firmware_repo(ctx): + log.info('Making sure firmware.git is not locked...') + ctx.cluster.run(args=['sudo', 'rm', '-f', + '/lib/firmware/updates/.git/index.lock', ]) + + +def check_console(hostname): + remote = Remote(hostname) + shortname = remote.shortname + console = remote.console + if not console: + return + cname = '{host}.{domain}'.format( + host=shortname, + domain=console.ipmidomain, + ) + log.info('checking console status of %s' % cname) + if console.check_status(): + log.info('console ready on %s' % cname) + return + if console.check_power('on'): + log.info('attempting to reboot %s' % cname) + console.power_cycle() + else: + log.info('attempting to power on %s' % cname) + console.power_on() + timeout = 100 + log.info('checking console status of %s with timeout %s' % + (cname, timeout)) + if console.check_status(timeout=timeout): + log.info('console ready on %s' % cname) + else: + log.error("Failed to get console status for %s, " % cname) diff --git a/teuthology/openstack/__init__.py b/teuthology/openstack/__init__.py new file mode 100644 index 0000000000..43f568737d --- /dev/null +++ b/teuthology/openstack/__init__.py @@ -0,0 +1,1366 @@ +# +# Copyright (c) 2015 Red Hat, Inc. +# +# Author: Loic Dachary +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +import copy +import datetime +import functools +import json +import logging +import operator +import os +import paramiko +import re +import socket +import subprocess +import tempfile +import teuthology +import time +import types +import yaml +import base64 + +from subprocess import CalledProcessError + +from teuthology.contextutil import safe_while +from teuthology.config import config as teuth_config +from teuthology.config import set_config_attr +from teuthology.orchestra import connection +from teuthology import misc + +from yaml.representer import SafeRepresenter + +class cmd_str(str): pass + +def cmd_repr(dumper, data): + scalar = SafeRepresenter.represent_str(dumper, data) + scalar.style ='|' + return scalar + +yaml.add_representer(cmd_str, cmd_repr) + +log = logging.getLogger(__name__) + +class NoFlavorException(Exception): + pass + +def enforce_json_dictionary(something): + if not isinstance(something, dict): + raise Exception( + 'Please pip uninstall --yes cliff-tablib and try again.' + ' Details about this error can be found at' + ' https://bugs.launchpad.net/python-openstackclient/+bug/1510546' + ' you are encouraged to add a comment if you want it to be' + ' fixed.') + +class OpenStackInstance(object): + + def __init__(self, name_or_id, info=None): + self.name_or_id = name_or_id + self.private_or_floating_ip = None + self.private_ip = None + if info is None: + self.set_info() + else: + self.info = {k.lower(): v for k, v in info.items()} + if isinstance(self.info, dict) and self.info.get('status', '') == 'ERROR': + errmsg = 'VM creation failed' + if 'message' in self.info: + errmsg = '{}: {}'.format(errmsg, self.info['message']) + raise Exception(errmsg) + + def set_info(self): + try: + self.info = json.loads( + OpenStack().run("server show -f json " + self.name_or_id)) + enforce_json_dictionary(self.info) + except CalledProcessError: + self.info = None + + def __getitem__(self, name): + return self.info[name.lower()] + + def get_created(self): + now = datetime.datetime.now() + created = datetime.datetime.strptime( + self['created'], '%Y-%m-%dT%H:%M:%SZ') + return (now - created).total_seconds() + + def exists(self): + return self.info is not None + + def get_volumes(self): + """ + Return the uuid of the volumes attached to the name_or_id + OpenStack instance. + """ + volumes = self['os-extended-volumes:volumes_attached'] + return [volume['id'] for volume in volumes ] + + def get_addresses(self): + """ + Return the list of IPs associated with instance_id in OpenStack. + """ + with safe_while(sleep=2, tries=30, + action="get ip " + self['id']) as proceed: + while proceed(): + found = re.match('.*\d+', self['addresses']) + if found: + return self['addresses'] + self.set_info() + + def get_ip_neutron(self): + subnets = json.loads(misc.sh("unset OS_AUTH_TYPE OS_TOKEN ; " + "neutron subnet-list -f json -c id -c ip_version")) + subnet_ids = [] + for subnet in subnets: + if subnet['ip_version'] == 4: + subnet_ids.append(subnet['id']) + if not subnet_ids: + raise Exception("no subnet with ip_version == 4") + ports = json.loads(misc.sh("unset OS_AUTH_TYPE OS_TOKEN ; " + "neutron port-list -f json -c fixed_ips -c device_id")) + fixed_ips = None + for port in ports: + if port['device_id'] == self['id']: + fixed_ips = port['fixed_ips'].split("\n") + break + if not fixed_ips: + raise Exception("no fixed ip record found") + ip = None + for fixed_ip in fixed_ips: + record = json.loads(fixed_ip) + if record['subnet_id'] in subnet_ids: + ip = record['ip_address'] + break + if not ip: + raise Exception("no ip") + return ip + + def get_ip(self, network): + """ + Return the private IP of the OpenStack instance_id. + """ + if self.private_ip is None: + try: + self.private_ip = self.get_ip_neutron() + except Exception as e: + log.debug("ignoring get_ip_neutron exception " + str(e)) + self.private_ip = re.findall(network + '=([\d.]+)', + self.get_addresses())[0] + return self.private_ip + + def get_floating_ip(self): + ips = TeuthologyOpenStack.get_os_floating_ips() + for ip in ips: + if ip['Fixed IP Address'] == self.get_ip(''): + return ip['Floating IP Address'] + return None + + def get_floating_ip_or_ip(self): + if not self.private_or_floating_ip: + self.private_or_floating_ip = self.get_floating_ip() + if not self.private_or_floating_ip: + self.private_or_floating_ip = self.get_ip('') + return self.private_or_floating_ip + + def destroy(self): + """ + Delete the name_or_id OpenStack instance. + """ + if not self.exists(): + return True + volumes = self.get_volumes() + OpenStack().run("server set --name REMOVE-ME-" + self.name_or_id + + " " + self['id']) + OpenStack().run("server delete --wait " + self['id'] + + " || true") + for volume in volumes: + OpenStack().volume_delete(volume) + return True + + +class OpenStack(object): + + # http://cdimage.debian.org/cdimage/openstack/current/ + # https://cloud-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64-disk1.img etc. + # http://download.opensuse.org/repositories/Cloud:/Images:/openSUSE_13.2/images/openSUSE-13.2-OpenStack-Guest.x86_64.qcow2 + # http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 etc. + # http://cloud.centos.org/centos/6/images/CentOS-6-x86_64-GenericCloud.qcow2 etc. + # https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-22-20150521.x86_64.qcow2 + # http://fedora.mirrors.ovh.net/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2 + # http://fedora.mirrors.ovh.net/linux/releases/20/Images/x86_64/Fedora-x86_64-20-20131211.1-sda.qcow2 + image2url = { + 'centos-7.2-x86_64': 'http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1511.qcow2', + 'centos-7.3-x86_64': 'http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1701.qcow2', + 'opensuse-42.1-x86_64': 'http://download.opensuse.org/repositories/Cloud:/Images:/Leap_42.1/images/openSUSE-Leap-42.1-OpenStack.x86_64.qcow2', + 'opensuse-42.2-x86_64': 'http://download.opensuse.org/repositories/Cloud:/Images:/Leap_42.2/images/openSUSE-Leap-42.2-OpenStack.x86_64.qcow2', + 'opensuse-42.3-x86_64': 'http://download.opensuse.org/repositories/Cloud:/Images:/Leap_42.3/images/openSUSE-Leap-42.3-OpenStack.x86_64.qcow2', + 'ubuntu-14.04-x86_64': 'https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img', + 'ubuntu-14.04-aarch64': 'https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img', + 'ubuntu-14.04-i686': 'https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-i386-disk1.img', + 'ubuntu-16.04-x86_64': 'https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img', + 'ubuntu-16.04-aarch64': 'https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img', + 'ubuntu-16.04-i686': 'https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-i386-disk1.img', + 'debian-8.0-x86_64': 'http://cdimage.debian.org/cdimage/openstack/current/debian-8.7.1-20170215-openstack-amd64.qcow2', + } + + def __init__(self): + self.provider = None + self.key_filename = None + self.username = 'ubuntu' + self.up_string = "UNKNOWN" + self.teuthology_suite = 'teuthology-suite' + + token = None + token_expires = None + token_cache_duration = 3600 + + def cache_token(self): + if self.provider != 'ovh': + return False + if (OpenStack.token is None and + 'OS_TOKEN_VALUE' in os.environ and + 'OS_TOKEN_EXPIRES' in os.environ): + log.debug("get token from the environment of the parent process") + OpenStack.token = os.environ['OS_TOKEN_VALUE'] + OpenStack.token_expires = int(os.environ['OS_TOKEN_EXPIRES']) + if (OpenStack.token_expires is not None and + OpenStack.token_expires < time.time()): + log.debug("token discarded because it has expired") + OpenStack.token = None + if OpenStack.token is None: + if 'OS_TOKEN_VALUE' in os.environ: + del os.environ['OS_TOKEN_VALUE'] + OpenStack.token = misc.sh("openstack -q token issue -c id -f value").strip() + os.environ['OS_TOKEN_VALUE'] = OpenStack.token + OpenStack.token_expires = int(time.time() + OpenStack.token_cache_duration) + os.environ['OS_TOKEN_EXPIRES'] = str(OpenStack.token_expires) + log.debug("caching OS_TOKEN_VALUE " + "during %s seconds" % OpenStack.token_cache_duration) + return True + + def get_os_url(self, cmd, type=None): + if self.provider != 'ovh': + return "" + url = "" + if (type == 'compute' or + cmd.startswith("server ") or + cmd.startswith("flavor ")): + url = "https://compute.{reg}.cloud.ovh.net/v2/{tenant}" + elif (type == 'network' or + cmd.startswith("ip ") or + cmd.startswith("security ") or + cmd.startswith("network ")): + url = "https://network.compute.{reg}.cloud.ovh.net/" + elif (type == 'image' or + cmd.startswith("image ")): + url = "https://image.compute.{reg}.cloud.ovh.net/" + elif (type == 'volume' or + cmd.startswith("volume ")): + url = "https://volume.compute.{reg}.cloud.ovh.net/v2/{tenant}" + if url != "": + url = url.format(reg=os.environ['OS_REGION_NAME'], + tenant=os.environ['OS_TENANT_ID']) + return url + + def run(self, cmd, *args, **kwargs): + url = self.get_os_url(cmd, kwargs.get('type')) + if url != "": + if self.cache_token(): + os.environ['OS_TOKEN'] = os.environ['OS_TOKEN_VALUE'] + os.environ['OS_URL'] = url + if re.match('(server|flavor|ip|security|network|image|volume)', cmd): + cmd = "openstack --quiet " + cmd + try: + status = misc.sh(cmd) + finally: + if 'OS_TOKEN' in os.environ: + del os.environ['OS_TOKEN'] + if 'OS_URL' in os.environ: + del os.environ['OS_URL'] + return status + + def set_provider(self): + if 'OS_AUTH_URL' not in os.environ: + raise Exception('no OS_AUTH_URL environment variable') + providers = (('runabove.io', 'runabove'), + ('cloud.ovh.net', 'ovh'), + ('engcloud.prv.suse.net', 'ecp'), + ('cloudlab.us', 'cloudlab'), + ('entercloudsuite.com', 'entercloudsuite'), + ('rackspacecloud.com', 'rackspace'), + ('dream.io', 'dreamhost')) + self.provider = 'any' + for (pattern, provider) in providers: + if pattern in os.environ['OS_AUTH_URL']: + self.provider = provider + break + return self.provider + + def get_provider(self): + if self.provider is None: + self.set_provider() + return self.provider + + @staticmethod + def get_value(result, field): + """ + Get the value of a field from a result returned by the openstack + command in json format. + + :param result: A dictionary similar to the output of + 'openstack server show' + :param field: The name of the field whose value to retrieve. Case is + ignored. + """ + enforce_json_dictionary(result) + return result[field.lower()] + + def image_exists(self, image): + """ + Return true if the image exists in OpenStack. + """ + found = self.run("image list -f json --limit 2000 --private --property name='" + + self.image_name(image) + "'") + return len(json.loads(found)) > 0 + + def net_id(self, network): + """ + Return the uuid of the network in OpenStack. + """ + r = json.loads(self.run("network show -f json " + + network)) + return self.get_value(r, 'id') + + def type_version_arch(self, os_type, os_version, arch): + """ + Return the string used to differentiate os_type and os_version in names. + """ + return os_type + '-' + os_version + '-' + arch + + def image_name(self, name): + """ + Return the image name used by teuthology in OpenStack to avoid + conflicts with existing names. + """ + return "teuthology-" + name + + def image_create(self, name, arch): + """ + Upload an image into OpenStack + """ + misc.sh("wget -c -O " + name + ".qcow2 " + self.image2url[name]) + if self.get_provider() == 'dreamhost': + image = name + ".raw" + disk_format = 'raw' + misc.sh("qemu-img convert " + name + ".qcow2 " + image) + else: + image = name + ".qcow2" + disk_format = 'qcow2' + if self.get_provider() == 'runabove': + properties = [ + "--property architecture_restrict=" + arch, + "--property architecture=" + arch + ] + elif self.get_provider() == 'cloudlab': + # if not, nova-compute fails on the compute node with + # Error: Cirrus VGA not available + properties = [ + "--property hw_video_model=vga", + ] + else: + properties = [] + + misc.sh("openstack image create --property ownedby=teuthology " + + " ".join(properties) + + " --disk-format=" + disk_format + " --container-format=bare " + + " --private" + + " --file " + image + " " + self.image_name(name)) + + def image(self, os_type, os_version, arch): + """ + Return the image name for the given os_type and os_version. If the image + does not exist it will be created. + """ + name = self.type_version_arch(os_type, os_version, arch) + if not self.image_exists(name): + self.image_create(name, arch) + return self.image_name(name) + + @staticmethod + def sort_flavors(flavors): + def sort_flavor(a, b): + return (a['VCPUs'] - b['VCPUs'] or + a['RAM'] - b['RAM'] or + a['Disk'] - b['Disk']) + return sorted(flavors, cmp=sort_flavor) + + def get_os_flavors(self): + flavors = json.loads(self.run("flavor list -f json")) + return flavors + + def get_sorted_flavors(self, arch, select, flavor_list = None): + log.debug("flavor selection regex: " + select) + flavors = flavor_list or self.get_os_flavors() + found = [] + for flavor in flavors: + if select and not re.match(select, flavor['Name']): + continue + found.append(flavor) + sorted_flavors = OpenStack.sort_flavors(found) + log.debug("sorted flavors = " + str(sorted_flavors)) + return sorted_flavors + + def __flavor(self, hint, flavors): + """ + Return the smallest flavor that satisfies the desired size. + """ + flavors = OpenStack.sort_flavors(flavors) + for flavor in flavors: + if (flavor['RAM'] >= hint['ram'] and + flavor['VCPUs'] >= hint['cpus'] and + flavor['Disk'] >= hint['disk']): + return flavor['Name'] + raise NoFlavorException("openstack flavor list: " + str(flavors) + + " does not contain a flavor in which" + + " the desired " + str(hint) + " can fit") + + def __flavor_range(self, min, good, flavors): + """ + Return the smallest flavor that satisfies the good hint. + If no such flavor, get the largest flavor smaller than good + and larger than min. + """ + flavors = OpenStack.sort_flavors(flavors) + low_range = [] + for flavor in flavors: + if (flavor['RAM'] >= good['ram'] and + flavor['VCPUs'] >= good['cpus'] and + flavor['Disk'] >= good['disk']): + return flavor['Name'] + else: + low_range.append(flavor) + low_range.reverse() + for flavor in low_range: + if (flavor['RAM'] >= min['ram'] and + flavor['VCPUs'] >= min['cpus'] and + flavor['Disk'] >= min['disk']): + return flavor['Name'] + raise NoFlavorException("openstack flavor list: " + str(flavors) + + " does not contain a flavor which" + + " is larger than " + str(min)) + + def __flavor_wrapper(self, min, good, hint, arch): + """ + Wrapper for __flavor_range() and __flavor(), to hide the messiness of + the real world. + + This is the one, single place for coding OpenStack-provider-specific + heuristics for selecting flavors. + """ + select_dict = { + #'ovh': ['^(s1|vps-ssd)-', '^(c2-[0-9]+|(hg|sg)-.*ssd)$', '^(hg|sg|c2)-.*ssd'], + 'ovh': [ + '^s1-', '^c2-[0-9]+$', # new ovh flavors at first + '^vps-ssd-', '^(hg|sg)-.*ssd$' # old ovh flavors + ], + 'ecp': ['^(m1|m2).'], + } + if 'flavor' in teuth_config.openstack: + flavor_select = teuth_config.openstack['flavor'] or [None] + else: + flavor_select = select_dict[self.get_provider()] \ + if self.get_provider() in select_dict else [None] + all_flavors = self.get_os_flavors() + for select in flavor_select: + try: + flavors = self.get_sorted_flavors(arch, select, all_flavors) + if hint: + flavor = self.__flavor(hint, flavors) + else: + flavor = self.__flavor_range(min, good, flavors) + if flavor: + return flavor + except NoFlavorException: + log.debug('No flavor found for select [%s]' % select) + pass + raise NoFlavorException('No flavors found for filters: %s' % flavor_select) + + def flavor(self, hint, arch): + return self.__flavor_wrapper(None, None, hint, arch) + + def flavor_range(self, min, good, arch): + return self.__flavor_wrapper(min, good, None, arch) + + def interpret_hints(self, defaults, hints): + """ + Return a hint hash which is the interpretation of a list of hints + """ + result = copy.deepcopy(defaults) + if not hints: + return result + if type(hints) is types.DictType: + raise TypeError("openstack: " + str(hints) + + " must be an array, not a dict") + for hint in hints: + for resource in ('machine', 'volumes'): + if resource in hint: + new = hint[resource] + current = result[resource] + for key, value in hint[resource].items(): + current[key] = max(current[key], new[key]) + return result + + @staticmethod + def list_instances(): + ownedby = "ownedby='" + teuth_config.openstack['ip'] + "'" + all = json.loads(OpenStack().run( + "server list -f json --long --name 'target'")) + return filter(lambda instance: ownedby in instance['Properties'], all) + + @staticmethod + def list_volumes(): + ownedby = "ownedby='" + teuth_config.openstack['ip'] + "'" + all = json.loads(OpenStack().run("volume list -f json --long")) + def select(volume): + return (ownedby in volume['Properties'] and + volume['Display Name'].startswith('target')) + return filter(select, all) + + def cloud_init_wait(self, instance): + """ + Wait for cloud-init to complete on the name_or_ip OpenStack instance. + """ + ip = instance.get_floating_ip_or_ip() + log.debug('cloud_init_wait ' + ip) + client_args = { + 'user_at_host': '@'.join((self.username, ip)), + 'timeout': 240, + 'retry': False, + } + if self.key_filename: + log.debug("using key " + self.key_filename) + client_args['key_filename'] = self.key_filename + with safe_while(sleep=30, tries=30, + action="cloud_init_wait " + ip) as proceed: + success = False + # CentOS 6.6 logs in /var/log/clout-init-output.log + # CentOS 7.0 logs in /var/log/clout-init.log + tail = ("tail --follow=name --retry" + " /var/log/cloud-init*.log /tmp/init.out") + while proceed(): + try: + client = connection.connect(**client_args) + except paramiko.PasswordRequiredException: + raise Exception( + "The private key requires a passphrase.\n" + "Create a new key with:" + " openstack keypair create myself > myself.pem\n" + " chmod 600 myself.pem\n" + "and call teuthology-openstack with the options\n" + " --key-name myself --key-filename myself.pem\n") + except paramiko.AuthenticationException as e: + log.debug('cloud_init_wait AuthenticationException ' + str(e)) + continue + except socket.timeout as e: + log.debug('cloud_init_wait connect socket.timeout ' + str(e)) + continue + except socket.error as e: + log.debug('cloud_init_wait connect socket.error ' + str(e)) + continue + except Exception as e: + transients = ('Incompatible ssh peer', 'Unknown server') + for transient in transients: + if transient in str(e): + continue + log.exception('cloud_init_wait ' + ip) + raise + log.debug('cloud_init_wait ' + tail) + try: + # get the I/O channel to iterate line by line + transport = client.get_transport() + channel = transport.open_session() + channel.get_pty() + channel.settimeout(240) + output = channel.makefile('r', 1) + channel.exec_command(tail) + for line in iter(output.readline, b''): + log.info(line.strip()) + if self.up_string in line: + success = True + break + except socket.timeout: + client.close() + log.debug('cloud_init_wait socket.timeout ' + tail) + continue + except socket.error as e: + client.close() + log.debug('cloud_init_wait socket.error ' + str(e) + ' ' + tail) + continue + client.close() + if success: + break + return success + + def get_ip(self, instance_id, network): + return OpenStackInstance(instance_id).get_ip(network) + + def get_network(self): + nets = { + 'entercloudsuite' : 'default', + 'cloudlab' : 'flat-lan-1-net', + 'ecp' : 'sesci', + } + if 'network' in teuth_config.openstack: + return teuth_config.openstack['network'] + elif self.get_provider() in nets: + return nets[self.get_provider()] + else: + return None + + def net(self): + """ + Return the network to be used when creating an OpenStack instance. + By default it should not be set. But some providers such as + entercloudsuite require it is. + """ + log.debug('Using config: %s', teuth_config) + network = self.get_network() + return "--nic net-id=" + network if network else "" + + def get_available_archs(self): + if (self.get_provider() == 'cloudlab' or + (self.get_provider() == 'runabove' and + 'HZ1' in os.environ.get('OS_REGION_NAME', ''))): + return ('aarch64',) + else: + return ('x86_64', 'i686') + + def get_default_arch(self): + return self.get_available_archs()[0] + + def volume_delete(self, name_or_id): + self.run("volume set --name REMOVE-ME " + name_or_id + " || true") + self.run("volume delete " + name_or_id + " || true") + + +class TeuthologyOpenStack(OpenStack): + + def __init__(self, args, config, argv): + """ + args is of type argparse.Namespace as returned + when parsing argv and config is the job + configuration. The argv argument can be re-used + to build the arguments list of teuthology-suite. + """ + super(TeuthologyOpenStack, self).__init__() + self.argv = argv + self.args = args + self.config = config + self.up_string = 'teuthology is up and running' + self.user_data = 'teuthology/openstack/openstack-user-data.txt' + + def get_instance(self): + if not hasattr(self, 'instance'): + self.instance = OpenStackInstance(self.server_name()) + return self.instance + + def main(self): + """ + Entry point implementing the teuthology-openstack command. + """ + self.setup_logs() + set_config_attr(self.args) + log.debug('Teuthology config: %s' % self.config.openstack) + key_filenames = (lambda x: x if isinstance(x, list) else [x]) \ + (self.args.key_filename) + for keyfile in key_filenames: + if os.path.isfile(keyfile): + self.key_filename = keyfile + break + if not self.key_filename: + raise Exception('No key file provided, please, use --key-filename option') + self.verify_openstack() + if self.args.teardown: + self.teardown() + return 0 + if self.args.setup: + self.setup() + exit_code = 0 + if self.args.suite: + self.get_instance() + if self.args.wait: + self.reminders() + exit_code = self.run_suite() + self.reminders() + if self.args.teardown: + if self.args.suite and not self.args.wait: + log.error("it does not make sense to teardown a cluster" + " right after a suite is scheduled") + else: + self.teardown() + return exit_code + + def _upload_yaml_file(self, fp): + """ + Given an absolute path fp, assume it is a YAML file existing + on the local machine and upload it to the remote teuthology machine + (see https://github.com/SUSE/teuthology/issues/56 for details) + """ + f = open(fp, 'r') # will throw exception on failure + f.close() + log.info("Detected local YAML file {}".format(fp)) + machine = self.username + "@" + self.instance.get_floating_ip_or_ip() + + sshopts=('-o ConnectTimeout=3 -o UserKnownHostsFile=/dev/null ' + '-o StrictHostKeyChecking=no') + + def ssh_command(s): + return "ssh {o} -i {k} {m} sh -c \\\"{s}\\\"".format( + o=sshopts, + k=self.key_filename, + m=machine, + s=s, + ) + + log.info("Uploading local file {} to teuthology machine".format(fp)) + remote_fp=os.path.normpath( + '/home/{un}/yaml/{fp}'.format( + un=self.username, + fp=fp, + ) + ) + command = ssh_command("stat {aug_fp}".format( + aug_fp=remote_fp, + )) + try: + misc.sh(command) + except: + pass + else: + log.warning( + ('{fp} probably already exists remotely as {aug_fp}; ' + 'the remote one will be clobbered').format( + fp=fp, + aug_fp=remote_fp, + )) + remote_dn=os.path.dirname(remote_fp) + command = ssh_command("mkdir -p {aug_dn}".format( + aug_dn=remote_dn, + )) + misc.sh(command) # will throw exception on failure + command = "scp {o} -i {k} {yamlfile} {m}:{dn}".format( + o=sshopts, + k=self.key_filename, + yamlfile=fp, + m=machine, + dn=remote_dn, + ) + misc.sh(command) # will throw exception on failure + return remote_fp + + def _repos_from_file(self, path): + def __check_repo_dict(obj): + if not isinstance(obj, dict): + raise Exception( + 'repo item must be a dict, %s instead' % type(obj)) + required = ['name', 'url'] + if not all(x in obj.keys() for x in required): + raise Exception( + 'repo spec must have at least %s elements' % required) + + def __check_repo_list(obj): + if not isinstance(obj, list): + raise Exception( + 'repo data must be a list, %s instead' % type(obj)) + for i in obj: + __check_repo_dict(i) + + with open(path) as f: + if path.endswith('.yaml') or path.endswith('.yml'): + data = yaml.safe_load(f) + elif path.endswith('.json') or path.endswith('.jsn'): + data = json.load(f) + else: + raise Exception( + 'Cannot detect file type from name {name}. ' + 'Supported: .yaml, .yml, .json, .jsn' + .format(name=f.name)) + __check_repo_list(data) + return data + + def _repo_from_arg(self, value): + (name, url) = value.split(':', 1) + if '!' in name: + n, p = name.split('!', 1) + return {'name': n, 'priority': int(p), 'url': url} + else: + return {'name': name, 'url': url} + + def run_suite(self): + """ + Delegate running teuthology-suite to the OpenStack instance + running the teuthology cluster. + """ + original_argv = self.argv[:] + argv = ['--ceph', self.args.ceph, + '--ceph-repo', self.args.ceph_repo, + '--suite-repo', self.args.suite_repo, + '--suite-branch', self.args.suite_branch, + ] + while len(original_argv) > 0: + if original_argv[0] in ('--name', + '--nameserver', + '--conf', + '--teuthology-branch', + '--teuthology-git-url', + '--test-repo', + '--suite-repo', + '--suite-branch', + '--ceph-repo', + '--ceph', + '--ceph-workbench-branch', + '--ceph-workbench-git-url', + '--archive-upload', + '--archive-upload-url', + '--key-name', + '--key-filename', + '--simultaneous-jobs', + '--controller-cpus', + '--controller-ram', + '--controller-disk'): + del original_argv[0:2] + elif original_argv[0] in ('--teardown', + '--setup', + '--upload', + '--no-canonical-tags'): + del original_argv[0] + elif os.path.isabs(original_argv[0]): + remote_path = self._upload_yaml_file(original_argv[0]) + argv.append(remote_path) + original_argv.pop(0) + else: + argv.append(original_argv.pop(0)) + if self.args.test_repo: + log.info("Using repos: %s" % self.args.test_repo) + repos = functools.reduce(operator.concat, ( + self._repos_from_file(it.lstrip('@')) + if it.startswith('@') else + [self._repo_from_arg(it)] + for it in self.args.test_repo)) + + overrides = { + 'overrides': { + 'install': { + 'repos' : repos + } + } + } + with tempfile.NamedTemporaryFile(mode='w+b', + suffix='-artifact.yaml', + delete=False) as f: + yaml_file = f.name + log.debug("Using file " + yaml_file) + yaml.safe_dump(overrides, stream=f, default_flow_style=False) + + path = self._upload_yaml_file(yaml_file) + argv.append(path) + + # + # If --upload, provide --archive-upload{,-url} regardless of + # what was originally provided on the command line because the + # teuthology-openstack defaults are different from the + # teuthology-suite defaults. + # + if self.args.upload: + argv.extend(['--archive-upload', self.args.archive_upload, + '--archive-upload-url', self.args.archive_upload_url]) + ceph_repo = getattr(self.args, 'ceph_repo') + if ceph_repo: + command = ( + "perl -pi -e 's|.*{opt}.*|{opt}: {value}|'" + " ~/.teuthology.yaml" + ).format(opt='ceph_git_url', value=ceph_repo) + self.ssh(command) + user_home = '/home/' + self.username + openstack_home = user_home + '/teuthology/teuthology/openstack' + if self.args.test_repo: + argv.append(openstack_home + '/openstack-basic.yaml') + else: + argv.append(openstack_home + '/openstack-basic.yaml') + argv.append(openstack_home + '/openstack-buildpackages.yaml') + command = ( + "source ~/.bashrc_teuthology ; " + self.teuthology_suite + " " + + " --machine-type openstack " + + " ".join(map(lambda x: "'" + x + "'", argv)) + ) + return self.ssh(command) + + def reminders(self): + if self.key_filename: + identity = '-i ' + self.key_filename + ' ' + else: + identity = '' + if self.args.upload: + upload = 'upload to : ' + self.args.archive_upload + else: + upload = '' + log.info(""" +pulpito web interface: http://{ip}:8081/ +ssh access : ssh {identity}{username}@{ip} # logs in /usr/share/nginx/html +{upload}""".format(ip=self.instance.get_floating_ip_or_ip(), + username=self.username, + identity=identity, + upload=upload)) + + def setup(self): + instance = self.get_instance() + if not instance.exists(): + if self.get_provider() != 'rackspace': + self.create_security_group() + self.create_cluster() + self.reminders() + + def setup_logs(self): + """ + Setup the log level according to --verbose + """ + loglevel = logging.INFO + if self.args.verbose: + loglevel = logging.DEBUG + logging.getLogger("paramiko.transport").setLevel(logging.DEBUG) + teuthology.log.setLevel(loglevel) + + def ssh(self, command): + """ + Run a command in the OpenStack instance of the teuthology cluster. + Return the stdout / stderr of the command. + """ + ip = self.instance.get_floating_ip_or_ip() + client_args = { + 'user_at_host': '@'.join((self.username, ip)), + 'retry': False, + 'timeout': 240, + } + if self.key_filename: + log.debug("ssh overriding key with " + self.key_filename) + client_args['key_filename'] = self.key_filename + client = connection.connect(**client_args) + # get the I/O channel to iterate line by line + transport = client.get_transport() + channel = transport.open_session() + channel.get_pty() + channel.settimeout(900) + output = channel.makefile('r', 1) + log.debug(":ssh@" + ip + ":" + command) + channel.exec_command(command) + for line in iter(output.readline, b''): + log.info(line.strip()) + return channel.recv_exit_status() + + def verify_openstack(self): + """ + Check there is a working connection to an OpenStack cluster + and set the provider data member if it is among those we + know already. + """ + try: + self.run("flavor list | tail -2") + except subprocess.CalledProcessError: + log.exception("flavor list") + raise Exception("verify openrc.sh has been sourced") + + def teuthology_openstack_flavor(self, arch): + """ + Return an OpenStack flavor fit to run the teuthology cluster. + The RAM size depends on the maximum number of workers that + will run simultaneously. + """ + hint = { + 'disk': 10, # GB + 'ram': 1024, # MB + 'cpus': 1, + } + if self.args.simultaneous_jobs >= 100: + hint['ram'] = 60000 # MB + elif self.args.simultaneous_jobs >= 50: + hint['ram'] = 30000 # MB + elif self.args.simultaneous_jobs >= 25: + hint['ram'] = 15000 # MB + elif self.args.simultaneous_jobs >= 10: + hint['ram'] = 8000 # MB + elif self.args.simultaneous_jobs >= 2: + hint['ram'] = 4000 # MB + if self.args.controller_cpus > 0: + hint['cpus'] = self.args.controller_cpus + if self.args.controller_ram > 0: + hint['ram'] = self.args.controller_ram + if self.args.controller_disk > 0: + hint['disk'] = self.args.controller_disk + + return self.flavor(hint, arch) + + def get_user_data(self): + """ + Create a user-data.txt file to be used to spawn the teuthology + cluster, based on a template where the OpenStack credentials + and a few other values are substituted. + """ + path = tempfile.mktemp() + + with open(os.path.dirname(__file__) + '/bootstrap-teuthology.sh', 'rb') as f: + b64_bootstrap = base64.b64encode(f.read()) + bootstrap_content = str(b64_bootstrap.decode()) + + openrc_sh = '' + cacert_cmd = None + for (var, value) in os.environ.items(): + if var in ('OS_TOKEN_VALUE', 'OS_TOKEN_EXPIRES'): + continue + if var == 'OS_CACERT': + cacert_path = '/home/%s/.openstack.crt' % self.username + cacert_file = value + openrc_sh += 'export %s=%s\n' % (var, cacert_path) + cacert_cmd = ( + "su - -c 'cat > {path}' {user} <> " + "/tmp/init.out 2>&1".format( + url=self.args.teuthology_git_url, + branch=self.args.teuthology_branch, + user=self.username)), + cmd_str( + "su - -c 'cp /tmp/openrc.sh $HOME/openrc.sh' {user}" + .format(user=self.username)), + cmd_str( + "su - -c '(set +x ; source openrc.sh ; set -x ; cd teuthology ; " + "source virtualenv/bin/activate ; " + "teuthology/openstack/setup-openstack.sh {opts})' " + "{user} >> /tmp/init.out " + "2>&1".format(user=self.username, + opts=' '.join(setup_options + all_options))), + # wa: we want to stop paddles and pulpito started by + # setup-openstack before starting teuthology service + "pkill -f 'pecan serve'", + "pkill -f 'python run.py'", + "systemctl enable teuthology", + "systemctl start teuthology", + ] + if cacert_cmd: + cmds.insert(0,cmd_str(cacert_cmd)) + #cloud-config + cloud_config = { + 'bootcmd': [ + 'touch /tmp/init.out', + 'echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf', + ], + 'manage_etc_hosts': True, + 'system_info': { + 'default_user': { + 'name': self.username + } + }, + 'packages': [ + 'python-virtualenv', + 'git', + 'rsync', + ], + 'write_files': [ + { + 'path': '/tmp/bootstrap-teuthology.sh', + 'content': cmd_str(bootstrap_content), + 'encoding': 'b64', + 'permissions': '0755', + }, + { + 'path': '/tmp/openrc.sh', + 'owner': self.username, + 'content': cmd_str(openrc_sh_content), + 'encoding': 'b64', + 'permissions': '0644', + } + ], + 'runcmd': cmds, + 'final_message': 'teuthology is up and running after $UPTIME seconds' + } + user_data = "#cloud-config\n%s" % \ + yaml.dump(cloud_config, default_flow_style = False) + open(path, 'w').write(user_data) + log.debug("user_data: %s" % user_data) + + return path + + def key_pair(self): + return "teuth-%s" % self.args.name + + def server_name(self): + return "teuth-%s" % self.args.name + + def server_group(self): + return "teuth-%s" % self.args.name + + def worker_group(self): + return "teuth-%s-worker" % self.args.name + + def create_security_group(self): + """ + Create a security group that will be used by all teuthology + created instances. This should not be necessary in most cases + but some OpenStack providers enforce firewall restrictions even + among instances created within the same tenant. + """ + groups = misc.sh('openstack security group list -c Name -f value').split('\n') + if all(g in groups for g in [self.server_group(), self.worker_group()]): + return + misc.sh(""" +openstack security group delete {server} || true +openstack security group delete {worker} || true +openstack security group create {server} +openstack security group create {worker} +# access to teuthology VM from the outside +openstack security group rule create --proto tcp --dst-port 22 {server} # ssh +openstack security group rule create --proto tcp --dst-port 80 {server} # for log access +openstack security group rule create --proto tcp --dst-port 8080 {server} # pulpito +openstack security group rule create --proto tcp --dst-port 8081 {server} # paddles +# access between teuthology and workers +openstack security group rule create --src-group {worker} --dst-port 1:65535 {server} +openstack security group rule create --protocol udp --src-group {worker} --dst-port 1:65535 {server} +openstack security group rule create --src-group {server} --dst-port 1:65535 {worker} +openstack security group rule create --protocol udp --src-group {server} --dst-port 1:65535 {worker} +# access between members of one group +openstack security group rule create --src-group {worker} --dst-port 1:65535 {worker} +openstack security group rule create --protocol udp --src-group {worker} --dst-port 1:65535 {worker} +openstack security group rule create --src-group {server} --dst-port 1:65535 {server} +openstack security group rule create --protocol udp --src-group {server} --dst-port 1:65535 {server} + """.format(server=self.server_group(), worker=self.worker_group())) + + @staticmethod + def get_unassociated_floating_ip(): + """ + Return a floating IP address not associated with an instance or None. + """ + ips = TeuthologyOpenStack.get_os_floating_ips() + for ip in ips: + if not ip['Port']: + return ip['Floating IP Address'] + return None + + @staticmethod + def create_floating_ip(): + try: + pools = json.loads(OpenStack().run("ip floating pool list -f json")) + except subprocess.CalledProcessError as e: + if 'Floating ip pool operations are only available for Compute v2 network.' \ + in e.output: + log.debug(e.output) + log.debug('Trying newer API than Compute v2') + try: + network = 'floating' + ip = json.loads(misc.sh("openstack --quiet floating ip create -f json '%s'" % network)) + return ip['floating_ip_address'] + except subprocess.CalledProcessError: + log.debug("Can't create floating ip for network '%s'" % network) + + log.debug("create_floating_ip: ip floating pool list failed") + return None + if not pools: + return None + pool = pools[0]['Name'] + try: + ip = json.loads(OpenStack().run( + "ip floating create -f json '" + pool + "'")) + return ip['ip'] + except subprocess.CalledProcessError: + log.debug("create_floating_ip: not creating a floating ip") + return None + + @staticmethod + def associate_floating_ip(name_or_id): + """ + Associate a floating IP to the OpenStack instance + or do nothing if no floating ip can be created. + """ + ip = TeuthologyOpenStack.get_unassociated_floating_ip() + if not ip: + ip = TeuthologyOpenStack.create_floating_ip() + if ip: + OpenStack().run("ip floating add " + ip + " " + name_or_id) + + @staticmethod + def get_os_floating_ips(): + try: + ips = json.loads(OpenStack().run("ip floating list -f json")) + except subprocess.CalledProcessError as e: + log.warning(e) + if e.returncode == 1: + return [] + else: + raise e + return ips + + @staticmethod + def get_floating_ip_id(ip): + """ + Return the id of a floating IP + """ + results = TeuthologyOpenStack.get_os_floating_ips() + for result in results: + for k in ['IP', 'Floating IP Address']: + if k in result: + if result[k] == ip: + return str(result['ID']) + + return None + + def get_instance_id(self): + instance = self.get_instance() + if instance.info: + return instance['id'] + else: + return None + + @staticmethod + def delete_floating_ip(instance_id): + """ + Remove the floating ip from instance_id and delete it. + """ + ip = OpenStackInstance(instance_id).get_floating_ip() + if not ip: + return + OpenStack().run("ip floating remove " + ip + " " + instance_id) + ip_id = TeuthologyOpenStack.get_floating_ip_id(ip) + OpenStack().run("ip floating delete " + ip_id) + + def create_cluster(self): + user_data = self.get_user_data() + security_group = \ + " --security-group {teuthology}".format(teuthology=self.server_group()) + if self.get_provider() == 'rackspace': + security_group = '' + arch = self.get_default_arch() + flavor = self.teuthology_openstack_flavor(arch) + log.debug('Create server: %s' % self.server_name()) + log.debug('Using config: %s' % self.config.openstack) + log.debug('Using flavor: %s' % flavor) + key_name = self.args.key_name + if not key_name: + raise Exception('No key name provided, use --key-name option') + log.debug('Using key name: %s' % self.args.key_name) + self.run( + "server create " + + " --image '" + self.image('ubuntu', '16.04', arch) + "' " + + " --flavor '" + flavor + "' " + + " " + self.net() + + " --key-name " + key_name + + " --user-data " + user_data + + security_group + + " --wait " + self.server_name() + + " -f json") + os.unlink(user_data) + self.instance = OpenStackInstance(self.server_name()) + self.associate_floating_ip(self.instance['id']) + return self.cloud_init_wait(self.instance) + + def packages_repository(self): + return 'teuth-%s-repo' % self.args.name #packages-repository + + def teardown(self): + """ + Delete all instances run by the teuthology cluster and delete the + instance running the teuthology cluster. + """ + instance_id = self.get_instance_id() + + if instance_id: + self.ssh("sudo /etc/init.d/teuthology stop || true") + self.delete_floating_ip(instance_id) + self.run("server delete %s || true" % self.packages_repository()) + self.run("server delete --wait %s || true" % self.server_name()) + self.run("keypair delete %s || true" % self.key_pair()) + self.run("security group delete %s || true" % self.worker_group()) + self.run("security group delete %s || true" % self.server_group()) + +def main(ctx, argv): + return TeuthologyOpenStack(ctx, teuth_config, argv).main() diff --git a/teuthology/openstack/archive-key b/teuthology/openstack/archive-key new file mode 100644 index 0000000000..a8861441db --- /dev/null +++ b/teuthology/openstack/archive-key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAvLz+sao32JL/yMgwTFDTnQVZK3jyXlhQJpHLsgwgHWHQ/27L +fwEbGFVYsJNBGntZwCZvH/K4c0IevbnX/Y69qgmAc9ZpZQLIcIF0A8hmwVYRU+Ap +TAK2qAvadThWfiRBA6+SGoRy6VV5MWeq+hqlGf9axRKqhECNhHuGBuBeosUOZOOH +NVzvFIbp/4842yYrZUDnDzW7JX2kYGi6kaEAYeR8qYJgT/95Pm4Bgu1V7MI36rx1 +O/5BSPF3LvDSnnaZyHCDZtwzC50lBnS2nx8kKPmmdKBSEJoTdNRPIXZ/lMq5pzIW +QPDjI8O5pbX1BJcxfFlZ/h+bI6u8IX3vfTGHWwIDAQABAoIBAG5yLp0rHfkXtKT7 +OQA/wEW/znmZEkPRbD3VzZyIafanuhTv8heFPyTTNM5Hra5ghpniI99PO07/X1vp +OBMCB81MOCYRT6WzpjXoG0rnZ/I1enhZ0fDQGbFnFlTIPh0c/Aq7IEVyQoh24y/d +GXm4Q+tdufFfRfeUivv/CORXQin/Iugbklj8erjx+fdVKPUXilmDIEVleUncer5/ +K5Fxy0lWbm6ZX1fE+rfJvCwNjAaIJgrN8TWUTE8G72F9Y0YU9hRtqOZe6MMbSufy +5+/yj2Vgp+B8Id7Ass2ylDQKsjBett/M2bNKt/DUVIiaxKi0usNSerLvtbkWEw9s +tgUI6ukCgYEA6qqnZwkbgV0lpj1MrQ3BRnFxNR42z2MyEY5xRGaYp22ByxS207z8 +mM3EuLH8k2u6jzsGoPpBWhBbs97MuGDHwsMEO5rBpytnTE4Hxrgec/13Arzk4Bme +eqg1Ji+lNkoLzEHkuihskcZwnQ8uaOdqrnH/NRGuUhA9hjeh+lQzBy8CgYEAzeV1 +zYsw8xIBFtbmFhBQ8imHr0SQalTiQU2Qn46LORK0worsf4sZV5ZF3VBRdnCUwwbm +0XaMb3kE2UBlU8qPqLgxXPNjcEKuqtVlp76dT/lrXIhYUq+Famrf20Lm01kC5itz +QF247hnUfo2uzxpatuEr2ggs2NjuODn57tVw95UCgYEAv0s+C5AxC9OSzWFLEAcW +dwYi8toedBC4z/b9/nRkHJf4JkRMhW6ZuzaCFs2Ax+wZuIi1bqSSgYi0OHx3BhZe +wTWYTb5p/owzONCjJisRKByG14SETuqTdgmIyggs9YSG+Yr9mYM6fdr2EhI+EuYS +4QGsuOYg5GS4wqC3OglJT6ECgYA8y28QRPQsIXnO259OjnzINDkLKGyX6P5xl8yH +QFidfod/FfQk6NaPxSBV67xSA4X5XBVVbfKji5FB8MC6kAoBIHn63ybSY+4dJSuB +70eV8KihxuSFbawwMuRsYoGzkAnKGrRKIiJTs67Ju14NatO0QiJnm5haYxtb4MqK +md1kTQKBgDmTxtSBVOV8eMhl076OoOvdnpb3sy/obI/XUvurS0CaAcqmkVSNJ6c+ +g1O041ocTbuW5d3fbzo9Jyle6qsvUQd7fuoUfAMrd0inKsuYPPM0IZOExbt8QqLI +KFJ+r/nQYoJkmiNO8PssxcP3CMFB6TpUx0BgFcrhH//TtKKNrGTl +-----END RSA PRIVATE KEY----- diff --git a/teuthology/openstack/archive-key.pub b/teuthology/openstack/archive-key.pub new file mode 100644 index 0000000000..57513806d4 --- /dev/null +++ b/teuthology/openstack/archive-key.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8vP6xqjfYkv/IyDBMUNOdBVkrePJeWFAmkcuyDCAdYdD/bst/ARsYVViwk0Eae1nAJm8f8rhzQh69udf9jr2qCYBz1mllAshwgXQDyGbBVhFT4ClMAraoC9p1OFZ+JEEDr5IahHLpVXkxZ6r6GqUZ/1rFEqqEQI2Ee4YG4F6ixQ5k44c1XO8Uhun/jzjbJitlQOcPNbslfaRgaLqRoQBh5HypgmBP/3k+bgGC7VXswjfqvHU7/kFI8Xcu8NKedpnIcINm3DMLnSUGdLafHyQo+aZ0oFIQmhN01E8hdn+UyrmnMhZA8OMjw7mltfUElzF8WVn+H5sjq7whfe99MYdb loic@fold diff --git a/teuthology/openstack/bootstrap-teuthology.sh b/teuthology/openstack/bootstrap-teuthology.sh new file mode 100644 index 0000000000..df433315ed --- /dev/null +++ b/teuthology/openstack/bootstrap-teuthology.sh @@ -0,0 +1,33 @@ +#!/bin/bash -ex +TEUTH_PATH=${1:-"teuthology"} +TEUTH_GIT=${2:-"https://github.com/ceph/teuthology"} +TEUTH_BRANCH=${3:-"main"} + +mkdir -p $TEUTH_PATH +git init $TEUTH_PATH + +pushd $TEUTH_PATH + +echo Fetch upstream changes from $TEUTH_GIT +git fetch --tags --progress $TEUTH_GIT +refs/heads/*:refs/remotes/origin/* +git config remote.origin.url $TEUTH_GIT +git config --add remote.origin.fetch +refs/heads/*:refs/remotes/origin/* +git config remote.origin.url $TEUTH_GIT + +# Check if branch has form origin/pr/*/merge +isPR="^origin\/pr\/" +if [[ "$TEUTH_BRANCH" =~ $isPR ]] ; then + +git fetch --tags --progress https://github.com/suse/teuthology +refs/pull/*:refs/remotes/origin/pr/* +rev=$(git rev-parse refs/remotes/$TEUTH_BRANCH^{commit}) + +git config core.sparsecheckout +git checkout -f $rev +else +git checkout $TEUTH_BRANCH +fi + +./bootstrap install + +popd + diff --git a/teuthology/openstack/openstack-basic.yaml b/teuthology/openstack/openstack-basic.yaml new file mode 100644 index 0000000000..db443f4df6 --- /dev/null +++ b/teuthology/openstack/openstack-basic.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + global: + osd heartbeat grace: 100 + # this line to address issue #1017 + mon lease: 15 + mon lease ack timeout: 25 + s3tests: + idle_timeout: 1200 + ceph-fuse: + client.0: + mount_wait: 60 + mount_timeout: 120 +archive-on-error: true diff --git a/teuthology/openstack/openstack-buildpackages.yaml b/teuthology/openstack/openstack-buildpackages.yaml new file mode 100644 index 0000000000..1e404b48c6 --- /dev/null +++ b/teuthology/openstack/openstack-buildpackages.yaml @@ -0,0 +1,10 @@ +tasks: + - buildpackages: + good_machine: + disk: 100 # GB + ram: 15000 # MB + cpus: 16 + min_machine: + disk: 100 # GB + ram: 8000 # MB + cpus: 1 diff --git a/teuthology/openstack/openstack-centos-6.5-user-data.txt b/teuthology/openstack/openstack-centos-6.5-user-data.txt new file mode 100644 index 0000000000..27e705df05 --- /dev/null +++ b/teuthology/openstack/openstack-centos-6.5-user-data.txt @@ -0,0 +1,24 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-* + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - yum install -y yum-utils && yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/6/x86_64/ && yum install --nogpgcheck -y epel-release && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 && rm /etc/yum.repos.d/dl.fedoraproject.org* + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp + - dracut-modules-growroot +runcmd: + - mkinitrd --force /boot/initramfs-2.6.32-573.3.1.el6.x86_64.img 2.6.32-573.3.1.el6.x86_64 + - reboot +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-centos-7.0-user-data.txt b/teuthology/openstack/openstack-centos-7.0-user-data.txt new file mode 100644 index 0000000000..475aaaa377 --- /dev/null +++ b/teuthology/openstack/openstack-centos-7.0-user-data.txt @@ -0,0 +1,21 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-* + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp + - redhat-lsb-core +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-centos-7.1-user-data.txt b/teuthology/openstack/openstack-centos-7.1-user-data.txt new file mode 100644 index 0000000000..475aaaa377 --- /dev/null +++ b/teuthology/openstack/openstack-centos-7.1-user-data.txt @@ -0,0 +1,21 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-* + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp + - redhat-lsb-core +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-centos-7.2-user-data.txt b/teuthology/openstack/openstack-centos-7.2-user-data.txt new file mode 100644 index 0000000000..475aaaa377 --- /dev/null +++ b/teuthology/openstack/openstack-centos-7.2-user-data.txt @@ -0,0 +1,21 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-* + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp + - redhat-lsb-core +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-centos-7.3-user-data.txt b/teuthology/openstack/openstack-centos-7.3-user-data.txt new file mode 120000 index 0000000000..123a8b40a9 --- /dev/null +++ b/teuthology/openstack/openstack-centos-7.3-user-data.txt @@ -0,0 +1 @@ +openstack-centos-7.2-user-data.txt \ No newline at end of file diff --git a/teuthology/openstack/openstack-debian-7.0-user-data.txt b/teuthology/openstack/openstack-debian-7.0-user-data.txt new file mode 120000 index 0000000000..1c0d256759 --- /dev/null +++ b/teuthology/openstack/openstack-debian-7.0-user-data.txt @@ -0,0 +1 @@ +openstack-ubuntu-14.04-user-data.txt \ No newline at end of file diff --git a/teuthology/openstack/openstack-debian-8.0-user-data.txt b/teuthology/openstack/openstack-debian-8.0-user-data.txt new file mode 100644 index 0000000000..61180663cd --- /dev/null +++ b/teuthology/openstack/openstack-debian-8.0-user-data.txt @@ -0,0 +1,24 @@ +#cloud-config +bootcmd: + - apt-get remove --purge -y resolvconf || true + - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf + - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf + - ifdown -a ; ifup -a + - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - echo "MaxSessions 1000" >> /etc/ssh/sshd_config +preserve_hostname: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp +runcmd: +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo + - echo '{username} ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-opensuse-15.0-user-data.txt b/teuthology/openstack/openstack-opensuse-15.0-user-data.txt new file mode 100644 index 0000000000..7cbbc852ff --- /dev/null +++ b/teuthology/openstack/openstack-opensuse-15.0-user-data.txt @@ -0,0 +1,26 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - zypper --non-interactive --gpg-auto-import-keys refresh + - zypper --non-interactive remove --force librados2 librbd1 multipath-tools-rbd qemu-block-rbd ntp + - zypper --non-interactive install --no-recommends --force wget git-core rsyslog lsb-release make gcc gcc-c++ salt-master salt-minion salt-api chrony + - systemctl enable chronyd.service + - systemctl start chronyd.service + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion + - sleep 30 +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-opensuse-15.1-user-data.txt b/teuthology/openstack/openstack-opensuse-15.1-user-data.txt new file mode 120000 index 0000000000..17a81c070d --- /dev/null +++ b/teuthology/openstack/openstack-opensuse-15.1-user-data.txt @@ -0,0 +1 @@ +openstack-opensuse-15.0-user-data.txt \ No newline at end of file diff --git a/teuthology/openstack/openstack-opensuse-42.1-user-data.txt b/teuthology/openstack/openstack-opensuse-42.1-user-data.txt new file mode 100644 index 0000000000..1860ef1400 --- /dev/null +++ b/teuthology/openstack/openstack-opensuse-42.1-user-data.txt @@ -0,0 +1,27 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - zypper --non-interactive --no-gpg-checks refresh + - zypper --non-interactive remove systemd-logger + - zypper --non-interactive install --no-recommends python wget git ntp rsyslog + lsb-release salt-minion salt-master make + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion + - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi ) + - systemctl enable salt-minion.service ntpd.service + - systemctl restart ntpd.service +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-opensuse-42.2-user-data.txt b/teuthology/openstack/openstack-opensuse-42.2-user-data.txt new file mode 100644 index 0000000000..c8ca72c626 --- /dev/null +++ b/teuthology/openstack/openstack-opensuse-42.2-user-data.txt @@ -0,0 +1,28 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - 'zypper rr openSUSE-Leap-Cloud-Tools || :' + - zypper --non-interactive --no-gpg-checks refresh + - zypper --non-interactive remove systemd-logger + - zypper --non-interactive install --no-recommends python wget git ntp rsyslog + lsb-release salt-minion salt-master make gcc gcc-c++ + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion + - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi ) + - systemctl enable salt-minion.service ntpd.service + - systemctl restart ntpd.service +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-opensuse-42.3-user-data.txt b/teuthology/openstack/openstack-opensuse-42.3-user-data.txt new file mode 100644 index 0000000000..ee7d4fd7b4 --- /dev/null +++ b/teuthology/openstack/openstack-opensuse-42.3-user-data.txt @@ -0,0 +1,27 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - 'zypper rr openSUSE-Leap-Cloud-Tools || :' + - zypper --non-interactive --no-gpg-checks refresh + - zypper --non-interactive remove systemd-logger + - zypper --non-interactive install --no-recommends python wget git ntp rsyslog lsb-release make gcc gcc-c++ + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion + - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi ) + - systemctl enable ntpd.service + - systemctl restart ntpd.service +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-sle-12.1-user-data.txt b/teuthology/openstack/openstack-sle-12.1-user-data.txt new file mode 100644 index 0000000000..820cd9c26b --- /dev/null +++ b/teuthology/openstack/openstack-sle-12.1-user-data.txt @@ -0,0 +1,25 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo + - SuSEfirewall2 stop +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - zypper --non-interactive --no-gpg-checks refresh + - zypper --non-interactive install --no-recommends python wget git ntp rsyslog + lsb-release make + - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi ) + - systemctl restart ntpd.service +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-sle-12.2-user-data.txt b/teuthology/openstack/openstack-sle-12.2-user-data.txt new file mode 100644 index 0000000000..6977f381e0 --- /dev/null +++ b/teuthology/openstack/openstack-sle-12.2-user-data.txt @@ -0,0 +1,27 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo + - SuSEfirewall2 stop +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - zypper --non-interactive --no-gpg-checks refresh + - zypper --non-interactive install --no-recommends python wget git ntp rsyslog + lsb-release salt-minion salt-master make gcc gcc-c++ + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion + - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi ) + - systemctl enable salt-minion.service ntpd.service + - systemctl restart ntpd.service +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-sle-12.3-user-data.txt b/teuthology/openstack/openstack-sle-12.3-user-data.txt new file mode 100644 index 0000000000..fa1d2267c5 --- /dev/null +++ b/teuthology/openstack/openstack-sle-12.3-user-data.txt @@ -0,0 +1,24 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo + - SuSEfirewall2 stop +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion + - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi ) + - systemctl enable salt-minion.service ntpd.service + - systemctl restart ntpd.service +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-sle-15.0-user-data.txt b/teuthology/openstack/openstack-sle-15.0-user-data.txt new file mode 100644 index 0000000000..0fb9008797 --- /dev/null +++ b/teuthology/openstack/openstack-sle-15.0-user-data.txt @@ -0,0 +1,25 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0 + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - zypper --non-interactive --no-gpg-checks refresh + - zypper --non-interactive install --no-recommends wget rsyslog lsb-release make gcc gcc-c++ chrony + - sed -i -e 's/^! pool/pool/' /etc/chrony.conf + - systemctl enable chronyd.service + - systemctl start chronyd.service + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-sle-15.1-user-data.txt b/teuthology/openstack/openstack-sle-15.1-user-data.txt new file mode 100644 index 0000000000..2bdd6eea11 --- /dev/null +++ b/teuthology/openstack/openstack-sle-15.1-user-data.txt @@ -0,0 +1,37 @@ +#cloud-config +bootcmd: + - echo nameserver {nameserver} | tee /etc/resolv.conf + - echo search {lab_domain} | tee -a /etc/resolv.conf + - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config +# See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +users: + - name: {username} + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - | + for i in $(seq 1 30) ; do + ping -q -c 1 8.8.8.8 && break + sleep 10 + done + ETH=$(ip route list | grep "scope link" | cut -f 3 -d ' ') + sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-$ETH + ( + curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | + sed -e 's/[\.-].*//' + eval printf "%03d%03d%03d%03d.{lab_domain}" $( + curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | + tr . ' ' ) + ) | tee /etc/hostname + hostname $(cat /etc/hostname) + - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh ) + - zypper --non-interactive --no-gpg-checks refresh + - zypper --non-interactive install --no-recommends wget rsyslog lsb-release make gcc gcc-c++ chrony + - sed -i -e 's/^! pool/pool/' /etc/chrony.conf + - systemctl enable chronyd.service + - systemctl start chronyd.service + - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-teuthology.cron b/teuthology/openstack/openstack-teuthology.cron new file mode 100644 index 0000000000..ee919bf7e9 --- /dev/null +++ b/teuthology/openstack/openstack-teuthology.cron @@ -0,0 +1,2 @@ +SHELL=/bin/bash +*/30 * * * * ( date ; source $HOME/openrc.sh ; time timeout 900 $HOME/teuthology/virtualenv/bin/teuthology-nuke --stale-openstack ) >> $HOME/cron.log 2>&1 diff --git a/teuthology/openstack/openstack-teuthology.init b/teuthology/openstack/openstack-teuthology.init new file mode 100755 index 0000000000..87bf35be44 --- /dev/null +++ b/teuthology/openstack/openstack-teuthology.init @@ -0,0 +1,225 @@ +#!/bin/bash +# +# Copyright (c) 2015 Red Hat, Inc. +# +# Author: Loic Dachary +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +### BEGIN INIT INFO +# Provides: teuthology +# Required-Start: $network $remote_fs $syslog beanstalkd nginx +# Required-Stop: $network $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: +# Short-Description: Start teuthology +### END INIT INFO + +cd /home/ubuntu + +source /etc/default/teuthology + +user=${TEUTHOLOGY_USERNAME:-ubuntu} + +export HOME=/home/$user + +function worker_pidfile() { + echo /var/run/teuthology-worker.$1.pid +} +function worker_logfile() { + echo /var/log/teuthology.${1}.log +} + +function stop_worker() { + wnum=$1 + wpidfile=$(worker_pidfile $wnum) + if [[ -f $wpidfile ]] ; then + wpid=$(cat $wpidfile) + echo Killing worker $wnum with pid=$wpid... + pkill -P $wpid + pkill $wpid + rm -f $wpidfile + fi +} + +function stop_workers() { + for i in $(seq 1 $NWORKERS) ; do + stop_worker $i + done +} + +function start_worker() { + local wlogfile=$1 + local wpidfile=$2 + mkdir -p /tmp/log && chown $user /tmp/log + su - -c " +cd /home/$user +source openrc.sh +cd teuthology +export LC_ALL=C +virtualenv/bin/teuthology-worker --tube openstack -l /tmp/log --archive-dir /usr/share/nginx/html +" $user > $wlogfile 2>&1 & { + echo $! > $wpidfile + echo "Started worker with pid=$! see log $wlogfile" + } +} + +function rkill() { + local pid=$1 + for i in $(pgrep -P $pid) ; do + rkill $i + done + echo Killing process $pid + kill -9 $pid +} +function status_process() { + local name=$1 + local pidf=$2 + [[ -f $pidf ]] && { + PID=$(cat $pidf) + STATUS=$(ps aux --no-headers -q $PID 2>&1 > /dev/null && echo running || echo dead) + echo $name PID:$PID STATUS:$STATUS + } +} + +function stop_process() { + local pidfile=$1 + [[ -f $pidfile ]] && { + local pid=$(cat $pidfile) + rkill $pid + ps aux --no-headers -q $pid 2>&1 > /dev/null || rm $pidfile + } +} + +function start_workers() { + for i in $(seq 1 $NWORKERS) ; do + local wpidfile=$(worker_pidfile $i) + local wlogfile=$(worker_logfile $i) + [[ -f $wpidfile ]] && { + local wpid=$(cat $wpidfile) + ps aux --no-headers -q $wpid 2>&1 > /dev/null && { + echo Worker $i is already running with process $wpid + continue + } + } + start_worker $wlogfile $wpidfile + done +} + +case $1 in + start-workers) + start_workers + ;; + status-workers|list-workers) + for i in $(ls /var/run | grep teuthology-worker | sort) ; do + WORKER=${i##teuthology-worker.} + WORKER=${WORKER%%.pid} + status_process "worker $WORKER" /var/run/$i + done + ;; + stop-workers) + echo Stopping workers + stop_workers + ;; + stop-worker) + stop_worker $2 + ;; + restart-workers) + $0 stop-workers + $1 start-workers + ;; + status-pulpito) + status_process pulpito /var/run/pulpito.pid + ;; + start-pulpito) + su - -c "cd /home/$user/pulpito ; virtualenv/bin/python run.py" $user > /var/log/pulpito.log 2>&1 & \ + echo $! > /var/run/pulpito.pid + ;; + stop-pulpito) + echo Stopping pulpito + stop_process /var/run/pulpito.pid + ;; + status-paddles) + status_process paddles /var/run/paddles.pid + ;; + start-paddles) + su - -c "cd /home/$user/paddles ; virtualenv/bin/pecan serve config.py" $user > /var/log/paddles.log 2>&1 & + echo $! > /var/run/paddles.pid + ;; + stop-paddles) + echo Stopping paddles + stop_process /var/run/paddles.pid + ;; + start) + /etc/init.d/beanstalkd start + $0 start-paddles + $0 start-pulpito + sleep 3 + ( + cd /home/$user + source openrc.sh + cd teuthology + . virtualenv/bin/activate + teuthology-lock --list-targets --owner scheduled_$user@teuthology > /tmp/t + if test -s /tmp/t && ! grep -qq 'targets: {}' /tmp/t ; then + teuthology-lock --unlock -t /tmp/t --owner scheduled_$user@teuthology + fi + start_workers + ) + ;; + stop) + #pkill -f 'pecan serve' + #pkill -f 'python run.py' + #pkill -f 'teuthology-worker' + $0 stop-pulpito + $0 stop-paddles + $0 stop-workers + pkill -f 'ansible' + /etc/init.d/beanstalkd stop + source /home/$user/teuthology/virtualenv/bin/activate + source /home/$user/openrc.sh + for dev in eth0 ens3 ; do + ip=$(ip a show dev $dev 2>/dev/null | sed -n "s:.*inet \(.*\)/.*:\1:p") + test "$ip" && break + done + openstack server list --long -f json --name target | \ + jq ".[] | select(.Properties | contains(\"ownedby='$ip'\")) | .ID" | \ + xargs --no-run-if-empty --max-args 1 -P20 openstack server delete --wait + openstack server list --long -f json --name ceph- | \ + jq ".[] | select(.Properties | contains(\"ownedby='$ip'\")) | .ID" | \ + xargs --no-run-if-empty --max-args 1 -P20 openstack server delete --wait + openstack volume list --long -f json | \ + jq ".[] | select(.Properties | contains(\"ownedby='$ip'\")) | .ID" | \ + xargs --no-run-if-empty --max-args 1 -P20 openstack volume delete + perl -pi -e 's/.*gitbuilder_host.*/gitbuilder_host: gitbuilder.ceph.com/' /home/$user/.teuthology.yaml + rm -fr /home/$user/src/* + mv /tmp/stampsdir /tmp/stampsdir.old + mkdir /tmp/stampsdir + chown $user /tmp/stampsdir + if test -f /tmp/stampsdir.old/packages-repository ; then + mv /tmp/stampsdir.old/*packages-repository* /tmp/stampsdir + fi + rm -fr /tmp/stampsdir.old + ;; + restart) + $0 stop + $0 start + ;; + *) +esac diff --git a/teuthology/openstack/openstack-ubuntu-12.04-user-data.txt b/teuthology/openstack/openstack-ubuntu-12.04-user-data.txt new file mode 100644 index 0000000000..0b104f5fdd --- /dev/null +++ b/teuthology/openstack/openstack-ubuntu-12.04-user-data.txt @@ -0,0 +1,23 @@ +#cloud-config +bootcmd: + - apt-get remove --purge -y resolvconf || true + - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf + - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf + - ifdown -a ; ifup -a + - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf + - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - echo "MaxSessions 1000" >> /etc/ssh/sshd_config +preserve_hostname: true +manage_etc_hosts: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp +runcmd: + - dpkg -l python wget git ntp >> /var/log/cloud-init-output.log + - echo "{up}" >> /var/log/cloud-init-output.log diff --git a/teuthology/openstack/openstack-ubuntu-14.04-user-data.txt b/teuthology/openstack/openstack-ubuntu-14.04-user-data.txt new file mode 100644 index 0000000000..5a6ea6a3d3 --- /dev/null +++ b/teuthology/openstack/openstack-ubuntu-14.04-user-data.txt @@ -0,0 +1,21 @@ +#cloud-config +bootcmd: + - apt-get remove --purge -y resolvconf || true + - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf + - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf + - ifdown -a ; ifup -a + - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf + - ( wget -qO - http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(wget -qO - http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - echo "MaxSessions 1000" >> /etc/ssh/sshd_config +manage_etc_hosts: true +preserve_hostname: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-ubuntu-16.04-user-data.txt b/teuthology/openstack/openstack-ubuntu-16.04-user-data.txt new file mode 100644 index 0000000000..5a6ea6a3d3 --- /dev/null +++ b/teuthology/openstack/openstack-ubuntu-16.04-user-data.txt @@ -0,0 +1,21 @@ +#cloud-config +bootcmd: + - apt-get remove --purge -y resolvconf || true + - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf + - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf + - ifdown -a ; ifup -a + - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf + - ( wget -qO - http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(wget -qO - http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname + - hostname $(cat /etc/hostname) + - echo "MaxSessions 1000" >> /etc/ssh/sshd_config +manage_etc_hosts: true +preserve_hostname: true +system_info: + default_user: + name: {username} +packages: + - python + - wget + - git + - ntp +final_message: "{up}, after $UPTIME seconds" diff --git a/teuthology/openstack/openstack-user-data.txt b/teuthology/openstack/openstack-user-data.txt new file mode 100644 index 0000000000..8b2ba9b850 --- /dev/null +++ b/teuthology/openstack/openstack-user-data.txt @@ -0,0 +1,22 @@ +#cloud-config +bootcmd: + - touch /tmp/init.out + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +system_info: + default_user: + name: TEUTHOLOGY_USERNAME +packages: + - python-virtualenv + - git + - rsync +runcmd: + - su - -c '(set -x ; CLONE_OPENSTACK && cd teuthology && ./bootstrap install)' TEUTHOLOGY_USERNAME >> /tmp/init.out 2>&1 + - echo 'export OPENRC' | tee /home/TEUTHOLOGY_USERNAME/openrc.sh + - su - -c '(set -x ; source openrc.sh ; cd teuthology ; source virtualenv/bin/activate ; teuthology/openstack/setup-openstack.sh --nworkers NWORKERS UPLOAD CEPH_WORKBENCH CANONICAL_TAGS SETUP_OPTIONS)' TEUTHOLOGY_USERNAME >> /tmp/init.out 2>&1 + # wa: we want to stop paddles and pulpito started by setup-openstack, before start teuthology service + - pkill -f 'pecan serve' + - pkill -f 'python run.py' + - systemctl enable teuthology + - systemctl start teuthology +final_message: "teuthology is up and running after $UPTIME seconds" diff --git a/teuthology/openstack/setup-openstack.sh b/teuthology/openstack/setup-openstack.sh new file mode 100755 index 0000000000..b6d9aa1b09 --- /dev/null +++ b/teuthology/openstack/setup-openstack.sh @@ -0,0 +1,793 @@ +#!/bin/bash +# +# Copyright (c) 2015 Red Hat, Inc. +# +# Author: Loic Dachary +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +# +# Most of this file is intended to be obsoleted by the ansible equivalent +# when they are available (setting up paddles, pulpito, etc.). +# +function create_config() { + local network="$1" + local subnet="$2" + local nameserver="$3" + local labdomain="$4" + local ip="$5" + local archive_upload="$6" + local canonical_tags="$7" + local selfname="$8" + local keypair="$9" + local server_name="${10}" + local server_group="${11}" + local worker_group="${12}" + local package_repo="${13}" + + if test "$network" ; then + network="network: $network" + fi + + if test "$archive_upload" ; then + archive_upload="archive_upload: $archive_upload" + fi + + cat > ~/.teuthology.yaml < ~/.vault_pass.txt + echo "OVERRIDE ~/.vault_pass.txt" + return 0 +} + +function apt_get_update() { + sudo apt-get update +} + +function setup_docker() { + if test -f /etc/apt/sources.list.d/docker.list ; then + echo "OK docker is installed" + else + sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D + echo deb https://apt.dockerproject.org/repo ubuntu-trusty main | sudo tee -a /etc/apt/sources.list.d/docker.list + sudo apt-get -qq install -y docker-engine + echo "INSTALLED docker" + fi +} + +function setup_fail2ban() { + if test -f /usr/bin/fail2ban-server; then + echo "OK fail2ban is installed" + else + sudo apt-get -qq install -y fail2ban + echo "INSTALLED fail2ban" + fi + sudo systemctl restart fail2ban + sudo systemctl enable fail2ban + echo "STARTED fail2ban" +} + +function setup_salt_master() { + if test -f /etc/salt/master ; then + echo "OK salt-master is installed" + else + sudo apt-get -qq install -y salt-master + fi +} + +function teardown_paddles() { + if pkill -f 'pecan' ; then + echo "SHUTDOWN the paddles server" + fi +} + +function setup_paddles() { + local ip=$1 + + local public_ip=$(curl --silent http://169.254.169.254/2009-04-04/meta-data/public-ipv4/) + if test -z "$public_ip" ; then + public_ip=$ip + fi + + local paddles_dir=$(dirname $0)/../../../paddles + + if ! test -d $paddles_dir ; then + git clone https://github.com/ceph/paddles.git $paddles_dir || return 1 + fi + + sudo apt-get -qq install -y --force-yes beanstalkd postgresql postgresql-contrib postgresql-server-dev-all supervisor + + if ! sudo /etc/init.d/postgresql status ; then + sudo mkdir -p /etc/postgresql + sudo chown postgres /etc/postgresql + sudo -u postgres pg_createcluster 9.3 paddles + sudo /etc/init.d/postgresql start || return 1 + fi + if ! psql --command 'select 1' 'postgresql://paddles:paddles@localhost/paddles' > /dev/null 2>&1 ; then + sudo -u postgres psql -c "CREATE USER paddles with PASSWORD 'paddles';" || return 1 + sudo -u postgres createdb -O paddles paddles || return 1 + fi + ( + cd $paddles_dir || return 1 + git pull --rebase + git clean -ffqdx + sed -e "s|^address.*|address = 'http://localhost'|" \ + -e "s|^job_log_href_templ = 'http://qa-proxy.ceph.com/teuthology|job_log_href_templ = 'http://$public_ip|" \ + -e "/sqlite/d" \ + -e "s|.*'postgresql+psycop.*'|'url': 'postgresql://paddles:paddles@localhost/paddles'|" \ + -e "s/'host': '127.0.0.1'/'host': '0.0.0.0'/" \ + < config.py.in > config.py + virtualenv ./virtualenv + source ./virtualenv/bin/activate + pip install -r requirements.txt + pip install sqlalchemy tzlocal requests netaddr + python setup.py develop + ) + + echo "CONFIGURED the paddles server" +} + +function populate_paddles() { + local subnets="$1" + local labdomain=$2 + + local paddles_dir=$(dirname $0)/../../../paddles + + local url='postgresql://paddles:paddles@localhost/paddles' + + pkill -f 'pecan serve' + + sudo -u postgres dropdb paddles + sudo -u postgres createdb -O paddles paddles + + ( + cd $paddles_dir || return 1 + source virtualenv/bin/activate + pecan populate config.py + + ( + echo "begin transaction;" + for subnet in $subnets ; do + subnet_names_and_ips $subnet | while read name ip ; do + echo "insert into nodes (name,machine_type,is_vm,locked,up) values ('${name}.${labdomain}', 'openstack', TRUE, FALSE, TRUE);" + done + done + echo "commit transaction;" + ) | psql --quiet $url + + setsid pecan serve config.py < /dev/null > /dev/null 2>&1 & + for i in $(seq 1 20) ; do + if curl --silent http://localhost:8080/ > /dev/null 2>&1 ; then + break + else + echo -n . + sleep 5 + fi + done + echo -n ' ' + ) + + echo "RESET the paddles server" +} + +function teardown_pulpito() { + if pkill -f 'python run.py' ; then + echo "SHUTDOWN the pulpito server" + fi +} + +function setup_pulpito() { + local pulpito=http://localhost:8081/ + + local pulpito_dir=$(dirname $0)/../../../pulpito + + if curl --silent $pulpito | grep -q pulpito ; then + echo "OK pulpito is running" + return 0 + fi + + if ! test -d $pulpito_dir ; then + git clone https://github.com/ceph/pulpito.git $pulpito_dir || return 1 + fi + + sudo apt-get -qq install -y --force-yes nginx + local nginx_conf=/etc/nginx/sites-available/default + sudo sed -i '/text\/plain/a\ text\/plain log;' \ + /etc/nginx/mime.types + sudo perl -pi -e 's|root /var/www/html|root /usr/share/nginx/html|' $nginx_conf + if ! grep -qq 'autoindex on' $nginx_conf ; then + sudo perl -pi -e 's|location / {|location / { autoindex on;|' $nginx_conf + sudo /etc/init.d/nginx restart + sudo rm -f /usr/share/nginx/html/*.html + echo "ADDED autoindex on to nginx configuration" + fi + sudo chown $USER /usr/share/nginx/html + ( + cd $pulpito_dir || return 1 + git pull --rebase + git clean -ffqdx + sed -e "s|paddles_address.*|paddles_address = 'http://localhost:8080'|" < config.py.in > prod.py + virtualenv ./virtualenv + source ./virtualenv/bin/activate + pip install --upgrade pip + pip install 'setuptools==18.2.0' + pip install -r requirements.txt + python run.py & + ) + + echo "LAUNCHED the pulpito server" +} + +function setup_bashrc() { + if test -f ~/.bashrc && grep -qq '.bashrc_teuthology' ~/.bashrc ; then + echo "OK .bashrc_teuthology found in ~/.bashrc" + else + cat > ~/.bashrc_teuthology <<'EOF' +source $HOME/openrc.sh +source $HOME/teuthology/virtualenv/bin/activate +export HISTSIZE=500000 +export PROMPT_COMMAND='history -a' +EOF + echo 'source $HOME/.bashrc_teuthology' >> ~/.bashrc + echo "ADDED .bashrc_teuthology to ~/.bashrc" + fi +} + +function setup_ssh_config() { + if test -f ~/.ssh/config && grep -qq 'StrictHostKeyChecking no' ~/.ssh/config ; then + echo "OK ~/.ssh/config" + else + cat >> ~/.ssh/config <> ~/.ssh/authorized_keys + chmod 600 teuthology/openstack/archive-key + echo "APPEND to ~/.ssh/authorized_keys" +} + +function setup_bootscript() { + local nworkers=$1 + + local where=$(dirname $0) + + sudo cp -a $where/openstack-teuthology.init /etc/init.d/teuthology + echo NWORKERS=$1 | sudo tee /etc/default/teuthology > /dev/null + echo "CREATED init script /etc/init.d/teuthology" +} + +function setup_crontab() { + local where=$(dirname $0) + crontab $where/openstack-teuthology.cron +} + +function remove_crontab() { + crontab -r +} + +function setup_ceph_workbench() { + local url=$1 + local branch=$2 + + ( + cd $HOME + source teuthology/virtualenv/bin/activate + if test "$url" ; then + git clone -b $branch $url + cd ceph-workbench + pip install -e . + echo "INSTALLED ceph-workbench from $url" + else + pip install ceph-workbench + echo "INSTALLED ceph-workbench from pypi" + fi + mkdir -p ~/.ceph-workbench + chmod 700 ~/.ceph-workbench + cp -a $HOME/openrc.sh ~/.ceph-workbench + cp -a $HOME/.ssh/id_rsa ~/.ceph-workbench/teuthology.pem + echo "RESET ceph-workbench credentials (key & OpenStack)" + ) +} + +function get_or_create_keypair() { + local keypair=$1 + + ( + cd $HOME/.ssh + if ! test -f $keypair.pem ; then + openstack keypair delete $keypair || true + openstack keypair create $keypair > $keypair.pem || return 1 + chmod 600 $keypair.pem + fi + if ! test -f $keypair.pub ; then + if ! ssh-keygen -y -f $keypair.pem > $keypair.pub ; then + cat $keypair.pub + return 1 + fi + fi + if ! openstack keypair show $keypair > $keypair.keypair 2>&1 ; then + openstack keypair create --public-key $keypair.pub $keypair || return 1 # noqa + else + fingerprint=$(ssh-keygen -l -f $keypair.pub | cut -d' ' -f2) + if ! grep --quiet $fingerprint $keypair.keypair ; then + openstack keypair delete $keypair || return 1 + openstack keypair create --public-key $keypair.pub $keypair || return 1 # noqa + fi + fi + ln -f $keypair.pem id_rsa + cat $keypair.pub >> authorized_keys + ) +} + +function delete_keypair() { + local keypair=$1 + + if openstack keypair show $keypair > /dev/null 2>&1 ; then + openstack keypair delete $keypair || return 1 + echo "REMOVED keypair $keypair" + fi +} + +function setup_dnsmasq() { + local provider=$1 + local dev=$2 + + if ! test -f /etc/dnsmasq.d/resolv ; then + resolver=$(grep nameserver /etc/resolv.conf | head -1 | perl -ne 'print $1 if(/\s*nameserver\s+([\d\.]+)/)') + sudo apt-get -qq install -y --force-yes dnsmasq resolvconf + # FIXME: this opens up dnsmasq to DNS reflection/amplification attacks, and can be reverted + # FIXME: once we figure out how to configure dnsmasq to accept DNS queries from all subnets + sudo perl -pi -e 's/--local-service//' /etc/init.d/dnsmasq + echo resolv-file=/etc/dnsmasq-resolv.conf | sudo tee /etc/dnsmasq.d/resolv + echo nameserver $resolver | sudo tee /etc/dnsmasq-resolv.conf + # restart is not always picking up changes + sudo /etc/init.d/dnsmasq stop || true + sudo /etc/init.d/dnsmasq start + sudo sed -ie 's/^#IGNORE_RESOLVCONF=yes/IGNORE_RESOLVCONF=yes/' /etc/default/dnsmasq + echo nameserver 127.0.0.1 | sudo tee /etc/resolvconf/resolv.conf.d/head + sudo resolvconf -u + if test $provider = cloudlab ; then + sudo perl -pi -e 's/.*(prepend domain-name-servers 127.0.0.1;)/\1/' /etc/dhcp/dhclient.conf + sudo bash -c "ifdown $dev ; ifup $dev" + fi + echo "INSTALLED dnsmasq and configured to be a resolver" + else + echo "OK dnsmasq installed" + fi +} + +function subnet_names_and_ips() { + local subnet=$1 + python -c 'import netaddr; print("\n".join([str(i) for i in netaddr.IPNetwork("'$subnet'")]))' | + sed -e 's/\./ /g' | while read a b c d ; do + printf "target%03d%03d%03d%03d " $a $b $c $d + echo $a.$b.$c.$d + done +} + +function define_dnsmasq() { + local subnets="$1" + local labdomain=$2 + local host_records=/etc/dnsmasq.d/teuthology + if ! test -f $host_records ; then + for subnet in $subnets ; do + subnet_names_and_ips $subnet | while read name ip ; do + echo host-record=$name.$labdomain,$ip + done + done | sudo tee $host_records > /tmp/dnsmasq + head -2 /tmp/dnsmasq + echo 'etc.' + # restart is not always picking up changes + sudo /etc/init.d/dnsmasq stop || true + sudo /etc/init.d/dnsmasq start + echo "CREATED $host_records" + else + echo "OK $host_records exists" + fi +} + +function undefine_dnsmasq() { + local host_records=/etc/dnsmasq.d/teuthology + + sudo rm -f $host_records + echo "REMOVED $host_records" +} + +function setup_ansible() { + local subnets="$1" + local labdomain=$2 + local dir=/etc/ansible/hosts + if ! test -f $dir/teuthology ; then + sudo mkdir -p $dir/group_vars + echo '[testnodes]' | sudo tee $dir/teuthology + for subnet in $subnets ; do + subnet_names_and_ips $subnet | while read name ip ; do + echo $name.$labdomain + done + done | sudo tee -a $dir/teuthology > /tmp/ansible + head -2 /tmp/ansible + echo 'etc.' + echo 'modify_fstab: false' | sudo tee $dir/group_vars/all.yml + echo "CREATED $dir/teuthology" + else + echo "OK $dir/teuthology exists" + fi +} + +function teardown_ansible() { + sudo rm -fr /etc/ansible/hosts/teuthology +} + +function remove_images() { + glance image-list --property-filter ownedby=teuthology | grep -v -e ---- -e 'Disk Format' | cut -f4 -d ' ' | while read image ; do + echo "DELETED image $image" + glance image-delete $image + done +} + +function install_packages() { + + if ! test -f /etc/apt/sources.list.d/trusty-backports.list ; then + echo deb http://archive.ubuntu.com/ubuntu trusty-backports main universe | sudo tee /etc/apt/sources.list.d/trusty-backports.list + sudo apt-get update + fi + + local packages="jq realpath curl" + sudo apt-get -qq install -y --force-yes $packages + + echo "INSTALL required packages $packages" +} + +CAT=${CAT:-cat} + +function verify_openstack() { + if ! openstack server list > /dev/null ; then + echo ERROR: the credentials from ~/openrc.sh are not working >&2 + return 1 + fi + echo "OK $OS_TENANT_NAME can use $OS_AUTH_URL" >&2 + local provider + if echo $OS_AUTH_URL | grep -qq cloud.ovh.net ; then + provider=ovh + elif echo $OS_AUTH_URL | grep -qq entercloudsuite.com ; then + provider=entercloudsuite + elif echo $OS_AUTH_URL | grep -qq cloudlab.us ; then + provider=cloudlab + else + provider=any + fi + echo "OPENSTACK PROVIDER $provider" >&2 + echo $provider +} + +function main() { + local network + local subnets + local nameserver + local labdomain=teuthology + local nworkers=2 + local keypair=teuthology + local selfname=teuthology + local server_name=teuthology + local server_group=teuthology + local worker_group=teuthology + local package_repo=packages-repository + local archive_upload + local ceph_workbench_git_url + local ceph_workbench_branch + + local do_setup_keypair=false + local do_apt_get_update=false + local do_setup_docker=false + local do_setup_salt_master=false + local do_ceph_workbench=false + local do_create_config=false + local do_setup_dnsmasq=false + local do_install_packages=false + local do_setup_paddles=false + local do_populate_paddles=false + local do_setup_pulpito=false + local do_clobber=false + local canonical_tags=true + + export LC_ALL=C + + while [ $# -ge 1 ]; do + case $1 in + --verbose) + set -x + PS4='${FUNCNAME[0]}: $LINENO: ' + ;; + --nameserver) + shift + nameserver=$1 + ;; + --subnets) + shift + subnets=$1 + ;; + --labdomain) + shift + labdomain=$1 + ;; + --network) + shift + network=$1 + ;; + --nworkers) + shift + nworkers=$1 + ;; + --archive-upload) + shift + archive_upload=$1 + ;; + --ceph-workbench-git-url) + shift + ceph_workbench_git_url=$1 + ;; + --ceph-workbench-branch) + shift + ceph_workbench_branch=$1 + ;; + --install) + do_install_packages=true + ;; + --config) + do_create_config=true + ;; + --setup-docker) + do_apt_get_update=true + do_setup_docker=true + ;; + --setup-salt-master) + do_apt_get_update=true + do_setup_salt_master=true + ;; + --server-name) + shift + server_name=$1 + ;; + --server-group) + shift + server_group=$1 + ;; + --worker-group) + shift + worker_group=$1 + ;; + --package-repo) + shift + package_repo=$1 + ;; + --selfname) + shift + selfname=$1 + ;; + --keypair) + shift + keypair=$1 + ;; + --setup-keypair) + do_setup_keypair=true + ;; + --setup-ceph-workbench) + do_ceph_workbench=true + ;; + --setup-dnsmasq) + do_setup_dnsmasq=true + ;; + --setup-fail2ban) + do_setup_fail2ban=true + ;; + --setup-paddles) + do_setup_paddles=true + ;; + --setup-pulpito) + do_setup_pulpito=true + ;; + --populate-paddles) + do_populate_paddles=true + ;; + --setup-all) + do_install_packages=true + do_ceph_workbench=true + do_create_config=true + do_setup_keypair=true + do_apt_get_update=true + do_setup_docker=true + do_setup_salt_master=true + do_setup_dnsmasq=true + do_setup_fail2ban=true + do_setup_paddles=true + do_setup_pulpito=true + do_populate_paddles=true + ;; + --clobber) + do_clobber=true + ;; + --no-canonical-tags) + canonical_tags=false + ;; + *) + echo $1 is not a known option + return 1 + ;; + esac + shift + done + + if $do_install_packages ; then + install_packages || return 1 + fi + + local provider=$(verify_openstack) + + # + # assume the first available IPv4 subnet is going to be used to assign IP to the instance + # + [ -z "$network" ] && { + local default_subnets=$(openstack subnet list --ip-version 4 -f json | jq -r '.[] | .Subnet' | sort | uniq) + } || { + local network_id=$(openstack network list -f json | jq -r ".[] | select(.Name == \"$network\") | .ID") + local default_subnets=$(openstack subnet list --ip-version 4 -f json \ + | jq -r ".[] | select(.Network == \"$network_id\") | .Subnet" | sort | uniq) + } + subnets=$(echo $subnets $default_subnets) + + case $provider in + entercloudsuite) + eval network=$(neutron net-list -f json | jq '.[] | select(.subnets | contains("'$subnet'")) | .name') + ;; + cloudlab) + network='flat-lan-1-net' + subnet='10.11.10.0/24' + ;; + esac + + local ip + for dev in eth0 ens3 ; do + ip=$(ip a show dev $dev 2>/dev/null | sed -n "s:.*inet \(.*\)/.*:\1:p") + test "$ip" && break + done + : ${nameserver:=$ip} + + if $do_create_config ; then + create_config "$network" "$subnets" "$nameserver" "$labdomain" "$ip" \ + "$archive_upload" "$canonical_tags" "$selfname" "$keypair" \ + "$server_name" "$server_group" "$worker_group" "$package_repo" || return 1 + setup_ansible "$subnets" $labdomain || return 1 + setup_ssh_config || return 1 + setup_authorized_keys || return 1 + setup_bashrc || return 1 + setup_bootscript $nworkers || return 1 + setup_crontab || return 1 + fi + + if $do_setup_keypair ; then + get_or_create_keypair $keypair || return 1 + fi + + if $do_ceph_workbench ; then + setup_ceph_workbench $ceph_workbench_git_url $ceph_workbench_branch || return 1 + fi + + if $do_apt_get_update ; then + apt_get_update || return 1 + fi + + if test $provider != "cloudlab" && $do_setup_docker ; then + setup_docker || return 1 + fi + + if $do_setup_salt_master ; then + setup_salt_master || return 1 + fi + + if $do_setup_fail2ban ; then + setup_fail2ban || return 1 + fi + + if $do_setup_dnsmasq ; then + setup_dnsmasq $provider $dev || return 1 + define_dnsmasq "$subnets" $labdomain || return 1 + fi + + if $do_setup_paddles ; then + setup_paddles $ip || return 1 + fi + + if $do_populate_paddles ; then + populate_paddles "$subnets" $labdomain || return 1 + fi + + if $do_setup_pulpito ; then + setup_pulpito || return 1 + fi + + if $do_clobber ; then + undefine_dnsmasq || return 1 + delete_keypair $keypair || return 1 + teardown_paddles || return 1 + teardown_pulpito || return 1 + teardown_ansible || return 1 + remove_images || return 1 + remove_crontab || return 1 + fi +} + +main "$@" diff --git a/teuthology/openstack/test/__init__.py b/teuthology/openstack/test/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/openstack/test/archive-on-error.yaml b/teuthology/openstack/test/archive-on-error.yaml new file mode 100644 index 0000000000..f9f5247926 --- /dev/null +++ b/teuthology/openstack/test/archive-on-error.yaml @@ -0,0 +1 @@ +archive-on-error: true diff --git a/teuthology/openstack/test/noop.yaml b/teuthology/openstack/test/noop.yaml new file mode 100644 index 0000000000..6aae7ec906 --- /dev/null +++ b/teuthology/openstack/test/noop.yaml @@ -0,0 +1,12 @@ +stop_worker: true +machine_type: openstack +os_type: ubuntu +os_version: "14.04" +roles: +- - mon.a + - osd.0 +tasks: +- exec: + mon.a: + - echo "Well done !" + diff --git a/teuthology/openstack/test/openstack-integration.py b/teuthology/openstack/test/openstack-integration.py new file mode 100644 index 0000000000..41dde59b22 --- /dev/null +++ b/teuthology/openstack/test/openstack-integration.py @@ -0,0 +1,286 @@ +# +# Copyright (c) 2015, 2016 Red Hat, Inc. +# +# Author: Loic Dachary +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +import argparse +import logging +import json +import os +import subprocess +import tempfile +import shutil + +import teuthology.lock +import teuthology.lock.cli +import teuthology.lock.query +import teuthology.lock.util +import teuthology.nuke +import teuthology.misc +import teuthology.schedule +import teuthology.suite +import teuthology.openstack +import scripts.schedule +import scripts.lock +import scripts.suite +from teuthology.config import config as teuth_config +from teuthology.config import set_config_attr + + +class Integration(object): + + @classmethod + def setup_class(self): + teuthology.log.setLevel(logging.DEBUG) + set_config_attr(argparse.Namespace()) + self.teardown_class() + + @classmethod + def teardown_class(self): + os.system("sudo /etc/init.d/beanstalkd restart") + # if this fails it will not show the error but some weird + # INTERNALERROR> IndexError: list index out of range + # move that to def tearDown for debug and when it works move it + # back in tearDownClass so it is not called on every test + ownedby = "ownedby='" + teuth_config.openstack['ip'] + all_instances = teuthology.openstack.OpenStack().run( + "server list -f json --long") + for instance in json.loads(all_instances): + if ownedby in instance['Properties']: + teuthology.openstack.OpenStack().run( + "server delete --wait " + instance['ID']) + + def setup_worker(self): + self.logs = self.d + "/log" + os.mkdir(self.logs, 0o755) + self.archive = self.d + "/archive" + os.mkdir(self.archive, 0o755) + self.worker_cmd = ("teuthology-worker --tube openstack " + + "-l " + self.logs + " " + "--archive-dir " + self.archive + " ") + logging.info(self.worker_cmd) + self.worker = subprocess.Popen(self.worker_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True) + + def wait_worker(self): + if not self.worker: + return + + (stdoutdata, stderrdata) = self.worker.communicate() + stdoutdata = stdoutdata.decode('utf-8') + stderrdata = stderrdata.decode('utf-8') + logging.info(self.worker_cmd + ":" + + " stdout " + stdoutdata + + " stderr " + stderrdata + " end ") + assert self.worker.returncode == 0 + self.worker = None + + def get_teuthology_log(self): + # the archive is removed before each test, there must + # be only one run and one job + run = os.listdir(self.archive)[0] + job = os.listdir(os.path.join(self.archive, run))[0] + path = os.path.join(self.archive, run, job, 'teuthology.log') + return open(path, 'r').read() + +class TestSuite(Integration): + + def setup(self): + self.d = tempfile.mkdtemp() + self.setup_worker() + logging.info("TestSuite: done worker") + + def teardown(self): + self.wait_worker() + shutil.rmtree(self.d) + + def test_suite_noop(self): + cwd = os.getcwd() + os.mkdir(self.d + '/upload', 0o755) + upload = 'localhost:' + self.d + '/upload' + args = ['--suite', 'noop', + '--suite-dir', cwd + '/teuthology/openstack/test', + '--machine-type', 'openstack', + '--archive-upload', upload, + '--verbose'] + logging.info("TestSuite:test_suite_noop") + scripts.suite.main(args) + self.wait_worker() + log = self.get_teuthology_log() + assert "teuthology.run:pass" in log + assert "Well done" in log + upload_key = teuth_config.archive_upload_key + if upload_key: + ssh = "RSYNC_RSH='ssh -i " + upload_key + "'" + else: + ssh = '' + assert 'teuthology.log' in teuthology.misc.sh(ssh + " rsync -av " + upload) + + def test_suite_nuke(self): + cwd = os.getcwd() + args = ['--suite', 'nuke', + '--suite-dir', cwd + '/teuthology/openstack/test', + '--machine-type', 'openstack', + '--verbose'] + logging.info("TestSuite:test_suite_nuke") + scripts.suite.main(args) + self.wait_worker() + log = self.get_teuthology_log() + assert "teuthology.run:FAIL" in log + locks = teuthology.lock.query.list_locks(locked=True) + assert len(locks) == 0 + +class TestSchedule(Integration): + + def setup(self): + self.d = tempfile.mkdtemp() + self.setup_worker() + + def teardown(self): + self.wait_worker() + shutil.rmtree(self.d) + + def test_schedule_stop_worker(self): + job = 'teuthology/openstack/test/stop_worker.yaml' + args = ['--name', 'fake', + '--verbose', + '--owner', 'test@test.com', + '--worker', 'openstack', + job] + scripts.schedule.main(args) + self.wait_worker() + + def test_schedule_noop(self): + job = 'teuthology/openstack/test/noop.yaml' + args = ['--name', 'fake', + '--verbose', + '--owner', 'test@test.com', + '--worker', 'openstack', + job] + scripts.schedule.main(args) + self.wait_worker() + log = self.get_teuthology_log() + assert "teuthology.run:pass" in log + assert "Well done" in log + + def test_schedule_resources_hint(self): + """It is tricky to test resources hint in a provider agnostic way. The + best way seems to ask for at least 1GB of RAM and 10GB + disk. Some providers do not offer a 1GB RAM flavor (OVH for + instance) and the 2GB RAM will be chosen instead. It however + seems unlikely that a 4GB RAM will be chosen because it would + mean such a provider has nothing under that limit and it's a + little too high. + + Since the default when installing is to ask for 7000 MB, we + can reasonably assume that the hint has been taken into + account if the instance has less than 4GB RAM. + """ + try: + teuthology.openstack.OpenStack().run("volume list") + job = 'teuthology/openstack/test/resources_hint.yaml' + has_cinder = True + except subprocess.CalledProcessError: + job = 'teuthology/openstack/test/resources_hint_no_cinder.yaml' + has_cinder = False + args = ['--name', 'fake', + '--verbose', + '--owner', 'test@test.com', + '--worker', 'openstack', + job] + scripts.schedule.main(args) + self.wait_worker() + log = self.get_teuthology_log() + assert "teuthology.run:pass" in log + assert "RAM size ok" in log + if has_cinder: + assert "Disk size ok" in log + +class TestLock(Integration): + + def setup(self): + self.options = ['--verbose', + '--machine-type', 'openstack' ] + + def test_main(self): + args = scripts.lock.parse_args(self.options + ['--lock']) + assert teuthology.lock.cli.main(args) == 0 + + def test_lock_unlock(self): + default_archs = teuthology.openstack.OpenStack().get_available_archs() + if 'TEST_IMAGES' in os.environ: + images = os.environ['TEST_IMAGES'].split() + else: + images = teuthology.openstack.OpenStack.image2url.keys() + for image in images: + (os_type, os_version, arch) = image.split('-') + if arch not in default_archs: + logging.info("skipping " + image + " because arch " + + " is not supported (" + str(default_archs) + ")") + continue + args = scripts.lock.parse_args(self.options + + ['--lock-many', '1', + '--os-type', os_type, + '--os-version', os_version, + '--arch', arch]) + assert teuthology.lock.cli.main(args) == 0 + locks = teuthology.lock.query.list_locks(locked=True) + assert len(locks) == 1 + args = scripts.lock.parse_args(self.options + + ['--unlock', locks[0]['name']]) + assert teuthology.lock.cli.main(args) == 0 + + def test_list(self, capsys): + args = scripts.lock.parse_args(self.options + ['--list', '--all']) + teuthology.lock.cli.main(args) + out, err = capsys.readouterr() + assert 'machine_type' in out + assert 'openstack' in out + +class TestNuke(Integration): + + def setup(self): + self.options = ['--verbose', + '--machine-type', 'openstack'] + + def test_nuke(self): + image = next(iter(teuthology.openstack.OpenStack.image2url.keys())) + + (os_type, os_version, arch) = image.split('-') + args = scripts.lock.parse_args(self.options + + ['--lock-many', '1', + '--os-type', os_type, + '--os-version', os_version]) + assert teuthology.lock.cli.main(args) == 0 + locks = teuthology.lock.query.list_locks(locked=True) + logging.info('list_locks = ' + str(locks)) + assert len(locks) == 1 + ctx = argparse.Namespace(name=None, + config={ + 'targets': { locks[0]['name']: None }, + }, + owner=locks[0]['locked_by'], + teuthology_config={}) + teuthology.nuke.nuke(ctx, should_unlock=True) + locks = teuthology.lock.query.list_locks(locked=True) + assert len(locks) == 0 diff --git a/teuthology/openstack/test/resources_hint.yaml b/teuthology/openstack/test/resources_hint.yaml new file mode 100644 index 0000000000..b8f595964b --- /dev/null +++ b/teuthology/openstack/test/resources_hint.yaml @@ -0,0 +1,25 @@ +stop_worker: true +machine_type: openstack +openstack: + - machine: + disk: 10 # GB + ram: 10000 # MB + cpus: 1 + volumes: + count: 1 + size: 2 # GB +os_type: ubuntu +os_version: "14.04" +roles: +- - mon.a + - osd.0 +tasks: +- exec: + mon.a: + - test $(sed -n -e 's/MemTotal.* \([0-9][0-9]*\).*/\1/p' < /proc/meminfo) -ge 10000000 && echo "RAM" "size" "ok" + - cat /proc/meminfo +# wait for the attached volume to show up + - for delay in 1 2 4 8 16 32 64 128 256 512 ; do if test -e /sys/block/vdb/size ; then break ; else sleep $delay ; fi ; done +# 4000000 because 512 bytes sectors + - test $(cat /sys/block/vdb/size) -gt 4000000 && echo "Disk" "size" "ok" + - cat /sys/block/vdb/size diff --git a/teuthology/openstack/test/resources_hint_no_cinder.yaml b/teuthology/openstack/test/resources_hint_no_cinder.yaml new file mode 100644 index 0000000000..c603804a53 --- /dev/null +++ b/teuthology/openstack/test/resources_hint_no_cinder.yaml @@ -0,0 +1,20 @@ +stop_worker: true +machine_type: openstack +openstack: + - machine: + disk: 10 # GB + ram: 10000 # MB + cpus: 1 + volumes: + count: 0 + size: 2 # GB +os_type: ubuntu +os_version: "14.04" +roles: +- - mon.a + - osd.0 +tasks: +- exec: + mon.a: + - cat /proc/meminfo + - test $(sed -n -e 's/MemTotal.* \([0-9][0-9]*\).*/\1/p' < /proc/meminfo) -ge 10000000 && echo "RAM" "size" "ok" diff --git a/teuthology/openstack/test/stop_worker.yaml b/teuthology/openstack/test/stop_worker.yaml new file mode 100644 index 0000000000..45133bb00a --- /dev/null +++ b/teuthology/openstack/test/stop_worker.yaml @@ -0,0 +1 @@ +stop_worker: true diff --git a/teuthology/openstack/test/suites/noop/+ b/teuthology/openstack/test/suites/noop/+ new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/openstack/test/suites/noop/noop.yaml b/teuthology/openstack/test/suites/noop/noop.yaml new file mode 100644 index 0000000000..49497c2282 --- /dev/null +++ b/teuthology/openstack/test/suites/noop/noop.yaml @@ -0,0 +1,9 @@ +stop_worker: true +roles: +- - mon.a + - osd.0 +tasks: +- exec: + mon.a: + - echo "Well done !" + diff --git a/teuthology/openstack/test/suites/nuke/+ b/teuthology/openstack/test/suites/nuke/+ new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/openstack/test/suites/nuke/nuke.yaml b/teuthology/openstack/test/suites/nuke/nuke.yaml new file mode 100644 index 0000000000..9ffd7ac5c9 --- /dev/null +++ b/teuthology/openstack/test/suites/nuke/nuke.yaml @@ -0,0 +1,8 @@ +stop_worker: true +nuke-on-error: true +roles: +- - client.0 +tasks: +- exec: + client.0: + - exit 1 diff --git a/teuthology/openstack/test/test_config.py b/teuthology/openstack/test/test_config.py new file mode 100644 index 0000000000..5fddeedf06 --- /dev/null +++ b/teuthology/openstack/test/test_config.py @@ -0,0 +1,35 @@ +from teuthology.config import config + + +class TestOpenStack(object): + + def setup(self): + self.openstack_config = config['openstack'] + + def test_config_clone(self): + assert 'clone' in self.openstack_config + + def test_config_user_data(self): + os_type = 'rhel' + os_version = '7.0' + template_path = self.openstack_config['user-data'].format( + os_type=os_type, + os_version=os_version) + assert os_type in template_path + assert os_version in template_path + + def test_config_ip(self): + assert 'ip' in self.openstack_config + + def test_config_machine(self): + assert 'machine' in self.openstack_config + machine_config = self.openstack_config['machine'] + assert 'disk' in machine_config + assert 'ram' in machine_config + assert 'cpus' in machine_config + + def test_config_volumes(self): + assert 'volumes' in self.openstack_config + volumes_config = self.openstack_config['volumes'] + assert 'count' in volumes_config + assert 'size' in volumes_config diff --git a/teuthology/openstack/test/test_openstack.py b/teuthology/openstack/test/test_openstack.py new file mode 100644 index 0000000000..7e5f4359cb --- /dev/null +++ b/teuthology/openstack/test/test_openstack.py @@ -0,0 +1,1695 @@ +# +# Copyright (c) 2015,2016 Red Hat, Inc. +# +# Author: Loic Dachary +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +import argparse +import logging +import os +import pytest +import subprocess +import tempfile +import time +from mock import patch + +import teuthology +from teuthology import misc +from teuthology.config import set_config_attr +from teuthology.openstack import TeuthologyOpenStack, OpenStack, OpenStackInstance +from teuthology.openstack import NoFlavorException +import scripts.openstack + + +class TestOpenStackBase(object): + + def setup(self): + OpenStack.token = None + OpenStack.token_expires = None + self.environ = {} + for k in os.environ.keys(): + if k.startswith('OS_'): + self.environ[k] = os.environ[k] + + def teardown(self): + OpenStack.token = None + OpenStack.token_expires = None + for k in os.environ.keys(): + if k.startswith('OS_'): + if k in self.environ: + os.environ[k] = self.environ[k] + else: + del os.environ[k] + +class TestOpenStackInstance(TestOpenStackBase): + + teuthology_instance = """ +{ + "OS-EXT-STS:task_state": null, + "addresses": "Ext-Net=167.114.233.32", + "image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)", + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000", + "flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)", + "id": "f3ca32d7-212b-458b-a0d4-57d1085af953", + "security_groups": [ + { + "name": "default" + } + ], + "user_id": "3a075820e5d24fda96cd340b87fd94e9", + "OS-DCF:diskConfig": "AUTO", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "project_id": "62cf1be03cec403c8ed8e64df55732ea", + "config_drive": "", + "status": "ACTIVE", + "updated": "2015-11-03T13:48:53Z", + "hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d", + "OS-SRV-USG:terminated_at": null, + "key_name": "loic", + "properties": "", + "OS-EXT-AZ:availability_zone": "nova", + "name": "mrdarkdragon", + "created": "2015-08-17T12:21:31Z", + "os-extended-volumes:volumes_attached": [{"id": "627e2631-fbb3-48cd-b801-d29cd2a76f74"}, {"id": "09837649-0881-4ee2-a560-adabefc28764"}, {"id": "44e5175b-6044-40be-885a-c9ddfb6f75bb"}] +} + """ + + teuthology_instance_no_addresses = """ +{ + "OS-EXT-STS:task_state": null, + "addresses": "", + "image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)", + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000", + "flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)", + "id": "f3ca32d7-212b-458b-a0d4-57d1085af953", + "security_groups": [ + { + "name": "default" + } + ], + "user_id": "3a075820e5d24fda96cd340b87fd94e9", + "OS-DCF:diskConfig": "AUTO", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "project_id": "62cf1be03cec403c8ed8e64df55732ea", + "config_drive": "", + "status": "ACTIVE", + "updated": "2015-11-03T13:48:53Z", + "hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d", + "OS-SRV-USG:terminated_at": null, + "key_name": "loic", + "properties": "", + "OS-EXT-AZ:availability_zone": "nova", + "name": "mrdarkdragon", + "created": "2015-08-17T12:21:31Z", + "os-extended-volumes:volumes_attached": [] +} + """ + + @classmethod + def setup_class(self): + if 'OS_AUTH_URL' not in os.environ: + pytest.skip('no OS_AUTH_URL environment variable') + + def test_init(self): + with patch.multiple( + misc, + sh=lambda cmd: self.teuthology_instance, + ): + o = OpenStackInstance('NAME') + assert o['id'] == 'f3ca32d7-212b-458b-a0d4-57d1085af953' + o = OpenStackInstance('NAME', {"id": "OTHER"}) + assert o['id'] == "OTHER" + + def test_get_created(self): + with patch.multiple( + misc, + sh=lambda cmd: self.teuthology_instance, + ): + o = OpenStackInstance('NAME') + assert o.get_created() > 0 + + def test_exists(self): + with patch.multiple( + misc, + sh=lambda cmd: self.teuthology_instance, + ): + o = OpenStackInstance('NAME') + assert o.exists() + def sh_raises(cmd): + raise subprocess.CalledProcessError('FAIL', 'BAD') + with patch.multiple( + misc, + sh=sh_raises, + ): + o = OpenStackInstance('NAME') + assert not o.exists() + + def test_volumes(self): + with patch.multiple( + misc, + sh=lambda cmd: self.teuthology_instance, + ): + o = OpenStackInstance('NAME') + assert len(o.get_volumes()) == 3 + + def test_get_addresses(self): + answers = [ + self.teuthology_instance_no_addresses, + self.teuthology_instance, + ] + def sh(self): + return answers.pop(0) + with patch.multiple( + misc, + sh=sh, + ): + o = OpenStackInstance('NAME') + assert o.get_addresses() == 'Ext-Net=167.114.233.32' + + def test_get_ip_neutron(self): + instance_id = '8e1fd70a-3065-46f8-9c30-84dc028c1834' + ip = '10.10.10.4' + def sh(cmd): + if 'neutron subnet-list' in cmd: + return """ +[ + { + "ip_version": 6, + "id": "c45b9661-b2ba-4817-9e3a-f8f63bf32989" + }, + { + "ip_version": 4, + "id": "e03a3dbc-afc8-4b52-952e-7bf755397b50" + } +] + """ + elif 'neutron port-list' in cmd: + return (""" +[ + { + "device_id": "915504ad-368b-4cce-be7c-4f8a83902e28", + "fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.1\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc::1\\"}" + }, + { + "device_id": "{instance_id}", + "fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"{ip}\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe07:76c1\\"}" + }, + { + "device_id": "17e4a968-4caa-4cee-8e4b-f950683a02bd", + "fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.5\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe9c:37f0\\"}" + } +] + """.replace('{instance_id}', instance_id). + replace('{ip}', ip)) + else: + raise Exception("unexpected " + cmd) + with patch.multiple( + misc, + sh=sh, + ): + assert ip == OpenStackInstance( + instance_id, + { 'id': instance_id }, + ).get_ip_neutron() + +class TestOpenStack(TestOpenStackBase): + + flavors = """[ + { + "Name": "eg-120-ssd", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 800, + "ID": "008f75de-c467-4d15-8f70-79c8fbe19538" + }, + { + "Name": "hg-60", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 1600, + "ID": "0297d7ac-fe6f-4ff1-b6e7-0b8b0908c94f" + }, + { + "Name": "win-sp-120-ssd-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "039e31f2-6541-46c8-85cf-7f47fab7ad78" + }, + { + "Name": "win-sp-60", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 400, + "ID": "0417a0e6-f68a-4b8b-a642-ca5ecb9652f7" + }, + { + "Name": "hg-120-ssd", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 800, + "ID": "042aefc6-b713-4a7e-ada5-3ff81daa1960" + }, + { + "Name": "win-sp-60-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "0609290c-ad2a-40f0-8c66-c755dd38fe3f" + }, + { + "Name": "win-eg-120", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 800, + "ID": "0651080f-5d07-44b1-a759-7ea4594b669e" + }, + { + "Name": "win-sp-240", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 1600, + "ID": "07885848-8831-486d-8525-91484c09cc7e" + }, + { + "Name": "win-hg-60-ssd", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 800, + "ID": "079aa0a2-5e48-4e58-8205-719bc962736e" + }, + { + "Name": "eg-120", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 1600, + "ID": "090f8b8c-673c-4ab8-9a07-6e54a8776e7b" + }, + { + "Name": "win-hg-15-ssd-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "10e10c58-d29f-4ff6-a1fd-085c35a3bd9b" + }, + { + "Name": "eg-15-ssd", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 200, + "ID": "1340a920-0f2f-4c1b-8d74-e2502258da73" + }, + { + "Name": "win-eg-30-ssd-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "13e54752-fbd0-47a6-aa93-e5a67dfbc743" + }, + { + "Name": "eg-120-ssd-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "15c07a54-2dfb-41d9-aa73-6989fd8cafc2" + }, + { + "Name": "win-eg-120-ssd-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "15e0dfcc-10f4-4e70-8ac1-30bc323273e2" + }, + { + "Name": "vps-ssd-1", + "RAM": 2000, + "Ephemeral": 0, + "VCPUs": 1, + "Is Public": true, + "Disk": 10, + "ID": "164fcc7e-7771-414f-a607-b388cb7b7aa0" + }, + { + "Name": "win-sp-120-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "169415e1-0979-4527-94fb-638c885bbd8c" + }, + { + "Name": "win-hg-60-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "16f13d5b-be27-4b8b-88da-959d3904d3ba" + }, + { + "Name": "win-sp-30-ssd", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 100, + "ID": "1788102b-ab80-4a0c-b819-541deaca7515" + }, + { + "Name": "win-sp-240-flex", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "17bcfa14-135f-442f-9397-a4dc25265560" + }, + { + "Name": "win-eg-60-ssd-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "194ca9ba-04af-4d86-ba37-d7da883a7eab" + }, + { + "Name": "win-eg-60-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "19ff8837-4751-4f6c-a82b-290bc53c83c1" + }, + { + "Name": "win-eg-30-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "1aaef5e5-4df9-4462-80d3-701683ab9ff0" + }, + { + "Name": "eg-15", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 400, + "ID": "1cd85b81-5e4d-477a-a127-eb496b1d75de" + }, + { + "Name": "hg-120", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 1600, + "ID": "1f1efedf-ec91-4a42-acd7-f5cf64b02d3c" + }, + { + "Name": "hg-15-ssd-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "20347a07-a289-4c07-a645-93cb5e8e2d30" + }, + { + "Name": "win-eg-7-ssd", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 100, + "ID": "20689394-bd77-4f4d-900e-52cc8a86aeb4" + }, + { + "Name": "win-sp-60-ssd-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "21104d99-ba7b-47a0-9133-7e884710089b" + }, + { + "Name": "win-sp-120-ssd", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 400, + "ID": "23c21ecc-9ee8-4ad3-bd9f-aa17a3faf84e" + }, + { + "Name": "win-hg-15-ssd", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 200, + "ID": "24e293ed-bc54-4f26-8fb7-7b9457d08e66" + }, + { + "Name": "eg-15-ssd-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "25f3534a-89e5-489d-aa8b-63f62e76875b" + }, + { + "Name": "win-eg-60", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 800, + "ID": "291173f1-ea1d-410b-8045-667361a4addb" + }, + { + "Name": "sp-30-ssd-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "2b646463-2efa-428b-94ed-4059923c3636" + }, + { + "Name": "win-eg-120-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "2c74df82-29d2-4b1a-a32c-d5633e7359b4" + }, + { + "Name": "win-eg-15-ssd", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 200, + "ID": "2fe4344f-d701-4bc4-8dcd-6d0b5d83fa13" + }, + { + "Name": "sp-30-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "31487b30-eeb6-472f-a9b6-38ace6587ebc" + }, + { + "Name": "win-sp-240-ssd", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 800, + "ID": "325b602f-ecc4-4444-90bd-5a2cf4e0da53" + }, + { + "Name": "win-hg-7", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 200, + "ID": "377ded36-491f-4ad7-9eb4-876798b2aea9" + }, + { + "Name": "sp-30-ssd", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 100, + "ID": "382f2831-4dba-40c4-bb7a-6fadff71c4db" + }, + { + "Name": "hg-30", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 800, + "ID": "3c1d6170-0097-4b5c-a3b3-adff1b7a86e0" + }, + { + "Name": "hg-60-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "3c669730-b5cd-4e44-8bd2-bc8d9f984ab2" + }, + { + "Name": "sp-240-ssd-flex", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "3d66fea3-26f2-4195-97ab-fdea3b836099" + }, + { + "Name": "sp-240-flex", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "40c781f7-d7a7-4b0d-bcca-5304aeabbcd9" + }, + { + "Name": "hg-7-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "42730e52-147d-46b8-9546-18e31e5ac8a9" + }, + { + "Name": "eg-30-ssd", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 400, + "ID": "463f30e9-7d7a-4693-944f-142067cf553b" + }, + { + "Name": "hg-15-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "534f07c6-91af-44c8-9e62-156360fe8359" + }, + { + "Name": "win-sp-30-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "55533fdf-ad57-4aa7-a2c6-ee31bb94e77b" + }, + { + "Name": "win-hg-60-ssd-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "58b24234-3804-4c4f-9eb6-5406a3a13758" + }, + { + "Name": "hg-7-ssd-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "596c1276-8e53-40a0-b183-cdd9e9b1907d" + }, + { + "Name": "win-hg-30-ssd", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 400, + "ID": "5c54dc08-28b9-4860-9f24-a2451b2a28ec" + }, + { + "Name": "eg-7", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 200, + "ID": "5e409dbc-3f4b-46e8-a629-a418c8497922" + }, + { + "Name": "hg-30-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "656423ea-0551-48c6-9e0f-ec6e15952029" + }, + { + "Name": "hg-15", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 400, + "ID": "675558ea-04fe-47a2-83de-40be9b2eacd4" + }, + { + "Name": "eg-60-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "68a8e4e1-d291-46e8-a724-fbb1c4b9b051" + }, + { + "Name": "hg-30-ssd", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 400, + "ID": "6ab72807-e0a5-4e9f-bbb9-7cbbf0038b26" + }, + { + "Name": "win-hg-30", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 800, + "ID": "6e12cae3-0492-483c-aa39-54a0dcaf86dd" + }, + { + "Name": "win-hg-7-ssd", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 100, + "ID": "6ead771c-e8b9-424c-afa0-671280416422" + }, + { + "Name": "win-hg-30-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "70ded741-8f58-4bb9-8cfd-5e838b66b5f3" + }, + { + "Name": "win-sp-30-ssd-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "7284d104-a260-421d-8cee-6dc905107b25" + }, + { + "Name": "win-eg-120-ssd", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 800, + "ID": "72c0b262-855d-40bb-a3e9-fd989a1bc466" + }, + { + "Name": "win-hg-7-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "73961591-c5f1-436f-b641-1a506eddaef4" + }, + { + "Name": "sp-240-ssd", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 800, + "ID": "7568d834-3b16-42ce-a2c1-0654e0781160" + }, + { + "Name": "win-eg-60-ssd", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 800, + "ID": "75f7fe5c-f87a-41d8-a961-a0169d02c268" + }, + { + "Name": "eg-7-ssd-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "77e1db73-0b36-4e37-8e47-32c2d2437ca9" + }, + { + "Name": "eg-60-ssd-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "78df4e30-98ca-4362-af68-037d958edaf0" + }, + { + "Name": "vps-ssd-2", + "RAM": 4000, + "Ephemeral": 0, + "VCPUs": 1, + "Is Public": true, + "Disk": 20, + "ID": "7939cc5c-79b1-45c0-be2d-aa935d92faa1" + }, + { + "Name": "sp-60", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 400, + "ID": "80d8510a-79cc-4307-8db7-d1965c9e8ddb" + }, + { + "Name": "win-hg-120-ssd-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "835e734a-46b6-4cb2-be68-e8678fd71059" + }, + { + "Name": "win-eg-7", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 200, + "ID": "84869b00-b43a-4523-babd-d47d206694e9" + }, + { + "Name": "win-eg-7-ssd-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "852308f8-b8bf-44a4-af41-cbc27437b275" + }, + { + "Name": "win-sp-30", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 200, + "ID": "8be9dc29-3eca-499b-ae2d-e3c99699131a" + }, + { + "Name": "win-hg-7-ssd-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "8d704cfd-05b2-4d4a-add2-e2868bcc081f" + }, + { + "Name": "eg-30", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 800, + "ID": "901f77c2-73f6-4fae-b28a-18b829b55a17" + }, + { + "Name": "sp-60-ssd-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "944b92fb-9a0c-406d-bb9f-a1d93cda9f01" + }, + { + "Name": "eg-30-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "965472c7-eb54-4d4d-bd6e-01ebb694a631" + }, + { + "Name": "sp-120-ssd", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 400, + "ID": "97824a8c-e683-49a8-a70a-ead64240395c" + }, + { + "Name": "hg-60-ssd", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 800, + "ID": "9831d7f1-3e79-483d-8958-88e3952c7ea2" + }, + { + "Name": "eg-60", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 1600, + "ID": "9e1f13d0-4fcc-4abc-a9e6-9c76d662c92d" + }, + { + "Name": "win-eg-30-ssd", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 400, + "ID": "9e6b85fa-6f37-45ce-a3d6-11ab40a28fad" + }, + { + "Name": "hg-120-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "9ed787cc-a0db-400b-8cc1-49b6384a1000" + }, + { + "Name": "sp-120-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "9f3cfdf7-b850-47cc-92be-33aefbfd2b05" + }, + { + "Name": "hg-60-ssd-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "a37bdf17-e1b1-41cc-a67f-ed665a120446" + }, + { + "Name": "win-hg-120-ssd", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 800, + "ID": "aa753e73-dadb-4528-9c4a-24e36fc41bf4" + }, + { + "Name": "win-sp-240-ssd-flex", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 50, + "ID": "abc007b8-cc44-4b6b-9606-fd647b03e101" + }, + { + "Name": "sp-120", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 800, + "ID": "ac74cb45-d895-47dd-b9cf-c17778033d83" + }, + { + "Name": "win-hg-15", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 400, + "ID": "ae900175-72bd-4fbc-8ab2-4673b468aa5b" + }, + { + "Name": "win-eg-15-ssd-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "aeb37dbf-d7c9-4fd7-93f1-f3818e488ede" + }, + { + "Name": "hg-7-ssd", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 100, + "ID": "b1dc776c-b6e3-4a96-b230-850f570db3d5" + }, + { + "Name": "sp-60-ssd", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 200, + "ID": "b24df495-10f3-466e-95ab-26f0f6839a2f" + }, + { + "Name": "win-hg-120", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 1600, + "ID": "b798e44e-bf71-488c-9335-f20bf5976547" + }, + { + "Name": "eg-7-ssd", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 100, + "ID": "b94e6623-913d-4147-b2a3-34ccf6fe7a5e" + }, + { + "Name": "eg-15-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "bb5fdda8-34ec-40c8-a4e3-308b9e2c9ee2" + }, + { + "Name": "win-eg-7-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "c65384f6-4665-461a-a292-2f3f5a016244" + }, + { + "Name": "eg-60-ssd", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 800, + "ID": "c678f1a8-6542-4f9d-89af-ffc98715d674" + }, + { + "Name": "hg-30-ssd-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "d147a094-b653-41e7-9250-8d4da3044334" + }, + { + "Name": "sp-30", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 200, + "ID": "d1acf88d-6f55-4c5c-a914-4ecbdbd50d6b" + }, + { + "Name": "sp-120-ssd-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "d2d33e8e-58b1-4661-8141-826c47f82166" + }, + { + "Name": "hg-120-ssd-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "d7322c37-9881-4a57-9b40-2499fe2e8f42" + }, + { + "Name": "win-hg-15-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "daf597ea-fbbc-4c71-a35e-5b41d33ccc6c" + }, + { + "Name": "win-hg-30-ssd-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "dcfd834c-3932-47a3-8b4b-cdfeecdfde2c" + }, + { + "Name": "win-hg-60", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 1600, + "ID": "def75cbd-a4b1-4f82-9152-90c65df9587b" + }, + { + "Name": "eg-30-ssd-flex", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 50, + "ID": "e04c7ad6-a5de-45f5-93c9-f3343bdfe8d1" + }, + { + "Name": "vps-ssd-3", + "RAM": 8000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 40, + "ID": "e43d7458-6b82-4a78-a712-3a4dc6748cf4" + }, + { + "Name": "win-eg-15-flex", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "e8bd3402-7310-4a0f-8b99-d9212359c957" + }, + { + "Name": "win-eg-30", + "RAM": 30000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 800, + "ID": "ebf7a997-e2f8-42f4-84f7-33a3d53d1af9" + }, + { + "Name": "eg-120-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "ec852ed3-1e42-4c59-abc3-12bcd26abec8" + }, + { + "Name": "sp-240", + "RAM": 240000, + "Ephemeral": 0, + "VCPUs": 16, + "Is Public": true, + "Disk": 1600, + "ID": "ed286e2c-769f-4c47-ac52-b8de7a4891f6" + }, + { + "Name": "win-sp-60-ssd", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 200, + "ID": "ed835a73-d9a0-43ee-bd89-999c51d8426d" + }, + { + "Name": "win-eg-15", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 400, + "ID": "f06056c1-a2d4-40e7-a7d8-e5bfabada72e" + }, + { + "Name": "win-sp-120", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 8, + "Is Public": true, + "Disk": 800, + "ID": "f247dc56-395b-49de-9a62-93ccc4fff4ed" + }, + { + "Name": "eg-7-flex", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 50, + "ID": "f476f959-ffa6-46f2-94d8-72293570604d" + }, + { + "Name": "sp-60-flex", + "RAM": 60000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 50, + "ID": "f52db47a-315f-49d4-bc5c-67dd118e7ac0" + }, + { + "Name": "win-hg-120-flex", + "RAM": 120000, + "Ephemeral": 0, + "VCPUs": 32, + "Is Public": true, + "Disk": 50, + "ID": "f6cb8144-5d98-4057-b44f-46da342fb571" + }, + { + "Name": "hg-7", + "RAM": 7000, + "Ephemeral": 0, + "VCPUs": 2, + "Is Public": true, + "Disk": 200, + "ID": "fa3cc551-0358-4170-be64-56ea432b064c" + }, + { + "Name": "hg-15-ssd", + "RAM": 15000, + "Ephemeral": 0, + "VCPUs": 4, + "Is Public": true, + "Disk": 200, + "ID": "ff48c2cf-c17f-4682-aaf6-31d66786f808" + } + ]""" + + @classmethod + def setup_class(self): + if 'OS_AUTH_URL' not in os.environ: + pytest.skip('no OS_AUTH_URL environment variable') + + @patch('teuthology.misc.sh') + def test_sorted_flavors(self, m_sh): + o = OpenStack() + select = '^(vps|hg)-.*ssd' + m_sh.return_value = TestOpenStack.flavors + flavors = o.get_sorted_flavors('arch', select) + assert [u'vps-ssd-1', + u'vps-ssd-2', + u'hg-7-ssd-flex', + u'hg-7-ssd', + u'vps-ssd-3', + u'hg-15-ssd-flex', + u'hg-15-ssd', + u'hg-30-ssd-flex', + u'hg-30-ssd', + u'hg-60-ssd-flex', + u'hg-60-ssd', + u'hg-120-ssd-flex', + u'hg-120-ssd', + ] == [ f['Name'] for f in flavors ] + m_sh.assert_called_with("openstack --quiet flavor list -f json") + + def test_flavor(self): + def get_sorted_flavors(self, arch, select): + return [ + { + 'Name': 'too_small', + 'RAM': 2048, + 'Disk': 50, + 'VCPUs': 1, + }, + ] + with patch.multiple( + OpenStack, + get_sorted_flavors=get_sorted_flavors, + ): + with pytest.raises(NoFlavorException): + hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 } + OpenStack().flavor(hint, 'arch') + + flavor = 'good-flavor' + def get_sorted_flavors(self, arch, select): + return [ + { + 'Name': flavor, + 'RAM': 2048, + 'Disk': 50, + 'VCPUs': 2, + }, + ] + with patch.multiple( + OpenStack, + get_sorted_flavors=get_sorted_flavors, + ): + hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 } + assert flavor == OpenStack().flavor(hint, 'arch') + + def test_flavor_range(self): + flavors = [ + { + 'Name': 'too_small', + 'RAM': 2048, + 'Disk': 50, + 'VCPUs': 1, + }, + ] + def get_sorted_flavors(self, arch, select): + return flavors + + min = { 'ram': 1000, 'disk': 40, 'cpus': 2 } + good = { 'ram': 4000, 'disk': 40, 'cpus': 2 } + + # + # there are no flavors in the required range + # + with patch.multiple( + OpenStack, + get_sorted_flavors=get_sorted_flavors, + ): + with pytest.raises(NoFlavorException): + OpenStack().flavor_range(min, good, 'arch') + + # + # there is one flavor in the required range + # + flavors.append({ + 'Name': 'min', + 'RAM': 2048, + 'Disk': 40, + 'VCPUs': 2, + }) + + with patch.multiple( + OpenStack, + get_sorted_flavors=get_sorted_flavors, + ): + + assert 'min' == OpenStack().flavor_range(min, good, 'arch') + + # + # out of the two flavors in the required range, get the bigger one + # + flavors.append({ + 'Name': 'good', + 'RAM': 3000, + 'Disk': 40, + 'VCPUs': 2, + }) + + with patch.multiple( + OpenStack, + get_sorted_flavors=get_sorted_flavors, + ): + + assert 'good' == OpenStack().flavor_range(min, good, 'arch') + + # + # there is one flavor bigger or equal to good, get this one + # + flavors.append({ + 'Name': 'best', + 'RAM': 4000, + 'Disk': 40, + 'VCPUs': 2, + }) + + with patch.multiple( + OpenStack, + get_sorted_flavors=get_sorted_flavors, + ): + + assert 'best' == OpenStack().flavor_range(min, good, 'arch') + + # + # there are two flavors bigger or equal to good, get the smallest one + # + flavors.append({ + 'Name': 'too_big', + 'RAM': 30000, + 'Disk': 400, + 'VCPUs': 20, + }) + + with patch.multiple( + OpenStack, + get_sorted_flavors=get_sorted_flavors, + ): + + assert 'best' == OpenStack().flavor_range(min, good, 'arch') + + + def test_interpret_hints(self): + defaults = { + 'machine': { + 'ram': 0, + 'disk': 0, + 'cpus': 0, + }, + 'volumes': { + 'count': 0, + 'size': 0, + }, + } + expected_disk = 10 # first hint larger than the second + expected_ram = 20 # second hint larger than the first + expected_cpus = 0 # not set, hence zero by default + expected_count = 30 # second hint larger than the first + expected_size = 40 # does not exist in the first hint + hints = [ + { + 'machine': { + 'ram': 2, + 'disk': expected_disk, + }, + 'volumes': { + 'count': 9, + 'size': expected_size, + }, + }, + { + 'machine': { + 'ram': expected_ram, + 'disk': 3, + }, + 'volumes': { + 'count': expected_count, + }, + }, + ] + hint = OpenStack().interpret_hints(defaults, hints) + assert hint == { + 'machine': { + 'ram': expected_ram, + 'disk': expected_disk, + 'cpus': expected_cpus, + }, + 'volumes': { + 'count': expected_count, + 'size': expected_size, + } + } + assert defaults == OpenStack().interpret_hints(defaults, None) + + def test_get_provider(self): + auth = os.environ.get('OS_AUTH_URL', None) + os.environ['OS_AUTH_URL'] = 'cloud.ovh.net' + assert OpenStack().get_provider() == 'ovh' + if auth != None: + os.environ['OS_AUTH_URL'] = auth + else: + del os.environ['OS_AUTH_URL'] + + def test_get_os_url(self): + o = OpenStack() + # + # Only for OVH + # + o.provider = 'something' + assert "" == o.get_os_url("server ") + o.provider = 'ovh' + assert "" == o.get_os_url("unknown ") + type2cmd = { + 'compute': ('server', 'flavor'), + 'network': ('ip', 'security', 'network'), + 'image': ('image',), + 'volume': ('volume',), + } + os.environ['OS_REGION_NAME'] = 'REGION' + os.environ['OS_TENANT_ID'] = 'TENANT' + for (type, cmds) in type2cmd.items(): + for cmd in cmds: + assert ("//" + type) in o.get_os_url(cmd + " ") + for type in type2cmd.keys(): + assert ("//" + type) in o.get_os_url("whatever ", type=type) + + @patch('teuthology.misc.sh') + def test_cache_token(self, m_sh): + token = 'TOKEN VALUE' + m_sh.return_value = token + OpenStack.token = None + o = OpenStack() + # + # Only for OVH + # + o.provider = 'something' + assert False == o.cache_token() + o.provider = 'ovh' + # + # Set the environment with the token + # + assert 'OS_TOKEN_VALUE' not in os.environ + assert 'OS_TOKEN_EXPIRES' not in os.environ + assert True == o.cache_token() + m_sh.assert_called_with('openstack -q token issue -c id -f value') + assert token == os.environ['OS_TOKEN_VALUE'] + assert token == OpenStack.token + assert time.time() < int(os.environ['OS_TOKEN_EXPIRES']) + assert time.time() < OpenStack.token_expires + # + # Reset after it expires + # + token_expires = int(time.time()) - 2000 + OpenStack.token_expires = token_expires + assert True == o.cache_token() + assert time.time() < int(os.environ['OS_TOKEN_EXPIRES']) + assert time.time() < OpenStack.token_expires + + @patch('teuthology.misc.sh') + def test_cache_token_from_environment(self, m_sh): + OpenStack.token = None + o = OpenStack() + o.provider = 'ovh' + token = 'TOKEN VALUE' + os.environ['OS_TOKEN_VALUE'] = token + token_expires = int(time.time()) + OpenStack.token_cache_duration + os.environ['OS_TOKEN_EXPIRES'] = str(token_expires) + assert True == o.cache_token() + assert token == OpenStack.token + assert token_expires == OpenStack.token_expires + m_sh.assert_not_called() + + @patch('teuthology.misc.sh') + def test_cache_token_expired_environment(self, m_sh): + token = 'TOKEN VALUE' + m_sh.return_value = token + OpenStack.token = None + o = OpenStack() + o.provider = 'ovh' + os.environ['OS_TOKEN_VALUE'] = token + token_expires = int(time.time()) - 2000 + os.environ['OS_TOKEN_EXPIRES'] = str(token_expires) + assert True == o.cache_token() + m_sh.assert_called_with('openstack -q token issue -c id -f value') + assert token == os.environ['OS_TOKEN_VALUE'] + assert token == OpenStack.token + assert time.time() < int(os.environ['OS_TOKEN_EXPIRES']) + assert time.time() < OpenStack.token_expires + +class TestTeuthologyOpenStack(TestOpenStackBase): + + @classmethod + def setup_class(self): + if 'OS_AUTH_URL' not in os.environ: + pytest.skip('no OS_AUTH_URL environment variable') + + teuthology.log.setLevel(logging.DEBUG) + set_config_attr(argparse.Namespace()) + + ip = TeuthologyOpenStack.create_floating_ip() + if ip: + ip_id = TeuthologyOpenStack.get_floating_ip_id(ip) + OpenStack().run("ip floating delete " + ip_id) + self.can_create_floating_ips = True + else: + self.can_create_floating_ips = False + + def setup(self): + super(TestTeuthologyOpenStack, self).setup() + self.key_filename = tempfile.mktemp() + self.key_name = 'teuthology-test' + self.name = 'teuthology-test' + self.clobber() + misc.sh(""" +openstack keypair create {key_name} > {key_filename} +chmod 600 {key_filename} + """.format(key_filename=self.key_filename, + key_name=self.key_name)) + self.options = ['--key-name', self.key_name, + '--key-filename', self.key_filename, + '--name', self.name, + '--verbose'] + + def teardown(self): + super(TestTeuthologyOpenStack, self).teardown() + self.clobber() + os.unlink(self.key_filename) + + def clobber(self): + misc.sh(""" +openstack server delete {name} --wait || true +openstack keypair delete {key_name} || true + """.format(key_name=self.key_name, + name=self.name)) + + def test_create(self, caplog): + teuthology_argv = [ + '--suite', 'upgrade/hammer', + '--dry-run', + '--ceph', 'main', + '--kernel', 'distro', + '--flavor', 'gcov', + '--distro', 'ubuntu', + '--suite-branch', 'hammer', + '--email', 'loic@dachary.org', + '--num', '10', + '--limit', '23', + '--subset', '1/2', + '--priority', '101', + '--timeout', '234', + '--filter', 'trasher', + '--filter-out', 'erasure-code', + '--throttle', '3', + ] + archive_upload = 'user@archive:/tmp' + argv = (self.options + + ['--teuthology-git-url', 'TEUTHOLOGY_URL', + '--teuthology-branch', 'TEUTHOLOGY_BRANCH', + '--ceph-workbench-git-url', 'CEPH_WORKBENCH_URL', + '--ceph-workbench-branch', 'CEPH_WORKBENCH_BRANCH', + '--upload', + '--archive-upload', archive_upload] + + teuthology_argv) + args = scripts.openstack.parse_args(argv) + teuthology_argv.extend([ + '--archive-upload', archive_upload, + '--archive-upload-url', args.archive_upload_url, + ]) + teuthology = TeuthologyOpenStack(args, None, argv) + teuthology.user_data = 'teuthology/openstack/test/user-data-test1.txt' + teuthology.teuthology_suite = 'echo --' + + teuthology.main() + assert 0 == teuthology.ssh("lsb_release -a") + assert 0 == teuthology.ssh("grep 'substituded variables' /var/log/cloud-init.log") + l = caplog.text + assert 'Ubuntu 14.04' in l + assert "nworkers=" + str(args.simultaneous_jobs) in l + assert "username=" + teuthology.username in l + assert "upload=--archive-upload user@archive:/tmp" in l + assert ("ceph_workbench=" + " --ceph-workbench-branch CEPH_WORKBENCH_BRANCH" + " --ceph-workbench-git-url CEPH_WORKBENCH_URL") in l + assert "clone=git clone -b TEUTHOLOGY_BRANCH TEUTHOLOGY_URL" in l + assert os.environ['OS_AUTH_URL'] in l + assert " ".join(teuthology_argv) in l + + if self.can_create_floating_ips: + ip = teuthology.get_floating_ip(self.name) + teuthology.teardown() + if self.can_create_floating_ips: + assert teuthology.get_floating_ip_id(ip) == None + + def test_floating_ip(self): + if not self.can_create_floating_ips: + pytest.skip('unable to create floating ips') + + expected = TeuthologyOpenStack.create_floating_ip() + ip = TeuthologyOpenStack.get_unassociated_floating_ip() + assert expected == ip + ip_id = TeuthologyOpenStack.get_floating_ip_id(ip) + OpenStack().run("ip floating delete " + ip_id) diff --git a/teuthology/openstack/test/user-data-test1.txt b/teuthology/openstack/test/user-data-test1.txt new file mode 100644 index 0000000000..4e3e466c21 --- /dev/null +++ b/teuthology/openstack/test/user-data-test1.txt @@ -0,0 +1,5 @@ +#cloud-config +system_info: + default_user: + name: ubuntu +final_message: "teuthology is up and running after $UPTIME seconds, substituded variables nworkers=NWORKERS openrc=OPENRC username=TEUTHOLOGY_USERNAME upload=UPLOAD ceph_workbench=CEPH_WORKBENCH clone=CLONE_OPENSTACK" diff --git a/teuthology/orchestra/__init__.py b/teuthology/orchestra/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/orchestra/cluster.py b/teuthology/orchestra/cluster.py new file mode 100644 index 0000000000..654ef0c3de --- /dev/null +++ b/teuthology/orchestra/cluster.py @@ -0,0 +1,188 @@ +""" +Cluster definition +part of context, Cluster is used to save connection information. +""" +from teuthology.orchestra import run + +class Cluster(object): + """ + Manage SSH connections to a cluster of machines. + """ + + def __init__(self, remotes=None): + """ + :param remotes: A sequence of 2-tuples of this format: + (Remote, [role_1, role_2 ...]) + """ + self.remotes = {} + if remotes is not None: + for remote, roles in remotes: + self.add(remote, roles) + + def __repr__(self): + remotes = [(k, v) for k, v in self.remotes.items()] + remotes.sort(key=lambda tup: tup[0].name) + remotes = '[' + ', '.join('[{remote!r}, {roles!r}]'.format( + remote=k, roles=v) for k, v in remotes) + ']' + return '{classname}(remotes={remotes})'.format( + classname=self.__class__.__name__, + remotes=remotes, + ) + + def __str__(self): + remotes = list(self.remotes.items()) + remotes.sort(key=lambda tup: tup[0].name) + remotes = ((k, ','.join(v)) for k, v in remotes) + remotes = ('{k}[{v}]'.format(k=k, v=v) for k, v in remotes) + return ' '.join(remotes) + + def add(self, remote, roles): + """ + Add roles to the list of remotes. + """ + if remote in self.remotes: + raise RuntimeError( + 'Remote {new!r} already found in remotes: {old!r}'.format( + new=remote, + old=self.remotes[remote], + ), + ) + self.remotes[remote] = list(roles) + + def run(self, wait=True, parallel=False, **kwargs): + """ + Run a command on all the nodes in this cluster. + + Goes through nodes in alphabetical order. + + The default usage is when parallel=False and wait=True, + which is a sequential run for each node one by one. + + If you specify parallel=True, it will be in parallel. + + If you specify wait=False, it returns immediately. + Since it is not possible to run sequentially and + do not wait each command run finished, the parallel value + is ignored and treated as True. + + Returns a list of `RemoteProcess`. + """ + # -+-------+----------+----------+------------+--------------- + # | wait | parallel | run.wait | remote.run | comments + # -+-------+----------+----------+------------+--------------- + # 1|*True |*False | no | wait=True | sequentially + # 2| True | True | yes | wait=False | parallel + # 3| False | True | no | wait=False | parallel + # 4| False | False | no | wait=False | same as above + + # We always run in parallel if wait=False, + # that is why (4) is equivalent to (3). + + # We wait from remote.run only if run sequentially. + _wait = (parallel == False and wait == True) + + remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name) + procs = [remote.run(**kwargs, wait=_wait) for remote in remotes] + + # We do run.wait only if parallel=True, because if parallel=False, + # we have run sequentially and all processes are complete. + + if parallel and wait: + run.wait(procs) + return procs + + def sh(self, script, **kwargs): + """ + Run a command on all the nodes in this cluster. + + Goes through nodes in alphabetical order. + + Returns a list of the command outputs correspondingly. + """ + remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name) + return [remote.sh(script, **kwargs) for remote in remotes] + + def write_file(self, file_name, content, sudo=False, perms=None, owner=None): + """ + Write text to a file on each node. + + :param file_name: file name + :param content: file content + :param sudo: use sudo + :param perms: file permissions (passed to chmod) ONLY if sudo is True + """ + remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name) + for remote in remotes: + if sudo: + remote.write_file(file_name, content, + sudo=True, mode=perms, owner=owner) + else: + if perms is not None or owner is not None: + raise ValueError("To specify perms or owner, sudo must be True") + remote.write_file(file_name, content) + + def only(self, *roles): + """ + Return a cluster with only the remotes that have all of given roles. + + For roles given as strings, they are matched against the roles + on a remote, and the remote passes the check only if all the + roles listed are present. + + Argument can be callable, and will act as a match on roles of + the remote. The matcher will be evaluated one role at a time, + but a match on any role is good enough. Note that this is + subtly diffent from the behavior of string roles, but is + logical if you consider a callable to be similar to passing a + non-string object with an `__eq__` method. + + For example:: + + web = mycluster.only(lambda role: role.startswith('web-')) + """ + c = self.__class__() + want = frozenset(r for r in roles if not callable(r)) + matchers = [r for r in roles if callable(r)] + + for remote, has_roles in self.remotes.items(): + # strings given as roles must all match + if frozenset(has_roles) & want != want: + # not a match + continue + + # every matcher given must match at least one role + if not all( + any(matcher(role) for role in has_roles) + for matcher in matchers + ): + continue + + c.add(remote, has_roles) + + return c + + def exclude(self, *roles): + """ + Return a cluster *without* remotes that have all of given roles. + + This is the opposite of `only`. + """ + matches = self.only(*roles) + c = self.__class__() + for remote, has_roles in self.remotes.items(): + if remote not in matches.remotes: + c.add(remote, has_roles) + return c + + def filter(self, func): + """ + Return a cluster whose remotes are filtered by `func`. + + Example:: + cluster = ctx.cluster.filter(lambda r: r.is_online) + """ + result = self.__class__() + for rem, roles in self.remotes.items(): + if func(rem): + result.add(rem, roles) + return result diff --git a/teuthology/orchestra/connection.py b/teuthology/orchestra/connection.py new file mode 100644 index 0000000000..d9706b5959 --- /dev/null +++ b/teuthology/orchestra/connection.py @@ -0,0 +1,110 @@ +""" +Connection utilities +""" +import paramiko +import os +import logging + +from teuthology.config import config +from teuthology.contextutil import safe_while +from paramiko.hostkeys import HostKeyEntry + +log = logging.getLogger(__name__) + + +def split_user(user_at_host): + """ + break apart user@host fields into user and host. + """ + try: + user, host = user_at_host.rsplit('@', 1) + except ValueError: + user, host = None, user_at_host + assert user != '', \ + "Bad input to split_user: {user_at_host!r}".format(user_at_host=user_at_host) + return user, host + + +def create_key(keytype, key): + """ + Create an ssh-rsa, ssh-dss or ssh-ed25519 key. + """ + l = "{hostname} {keytype} {key}".format(hostname="x", keytype=keytype, key=key) + + ke = HostKeyEntry.from_line(l) + assert ke, f'invalid host key "{keytype} {key}"' + return ke.key + + +def connect(user_at_host, host_key=None, keep_alive=False, timeout=60, + _SSHClient=None, _create_key=None, retry=True, key_filename=None): + """ + ssh connection routine. + + :param user_at_host: user@host + :param host_key: ssh key + :param keep_alive: keep_alive indicator + :param timeout: timeout in seconds + :param _SSHClient: client, default is paramiko ssh client + :param _create_key: routine to create a key (defaults to local reate_key) + :param retry: Whether or not to retry failed connection attempts + (eventually giving up if none succeed). Default is True + :param key_filename: Optionally override which private key to use. + :return: ssh connection. + """ + user, host = split_user(user_at_host) + if _SSHClient is None: + _SSHClient = paramiko.SSHClient + ssh = _SSHClient() + + if _create_key is None: + _create_key = create_key + + if host_key is None: + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if config.verify_host_keys is True: + ssh.load_system_host_keys() + + else: + keytype, key = host_key.split(' ', 1) + ssh.get_host_keys().add( + hostname=host, + keytype=keytype, + key=_create_key(keytype, key) + ) + + connect_args = dict( + hostname=host, + username=user, + timeout=timeout + ) + + ssh_config_path = os.path.expanduser("~/.ssh/config") + if os.path.exists(ssh_config_path): + ssh_config = paramiko.SSHConfig() + ssh_config.parse(open(ssh_config_path)) + opts = ssh_config.lookup(host) + if not key_filename and 'identityfile' in opts: + key_filename = opts['identityfile'] + + if key_filename: + if not isinstance(key_filename, list): + key_filename = [key_filename] + key_filename = [os.path.expanduser(f) for f in key_filename] + connect_args['key_filename'] = key_filename + + log.debug(connect_args) + + if not retry: + ssh.connect(**connect_args) + else: + # Retries are implemented using safe_while + with safe_while(sleep=1, action='connect to ' + host) as proceed: + while proceed(): + try: + ssh.connect(**connect_args) + break + except paramiko.AuthenticationException as e: + log.error(f"Error authenticating with {host}: {str(e)}") + ssh.get_transport().set_keepalive(keep_alive) + return ssh diff --git a/teuthology/orchestra/console.py b/teuthology/orchestra/console.py new file mode 100644 index 0000000000..2c594f279d --- /dev/null +++ b/teuthology/orchestra/console.py @@ -0,0 +1,388 @@ +import logging +import os +import pexpect +import psutil +import subprocess +import sys +import time + +import teuthology.lock.query +import teuthology.lock.util +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.exceptions import ConsoleError +from teuthology.misc import host_shortname + +try: + import libvirt +except ImportError: + libvirt = None + +log = logging.getLogger(__name__) + + +class RemoteConsole(): + def getShortName(self, name=None): + """ + Extract the name portion from remote name strings. + """ + hostname = (name or self.name).split('@')[-1] + return host_shortname(hostname) + + +class PhysicalConsole(RemoteConsole): + """ + Physical Console (set from getRemoteConsole) + """ + def __init__(self, name, ipmiuser=None, ipmipass=None, ipmidomain=None, + logfile=None, timeout=40): + self.name = name + self.shortname = self.getShortName(name) + self.timeout = timeout + self.logfile = None + self.ipmiuser = ipmiuser or config.ipmi_user + self.ipmipass = ipmipass or config.ipmi_password + self.ipmidomain = ipmidomain or config.ipmi_domain + self.has_ipmi_credentials = all( + [self.ipmiuser, self.ipmipass, self.ipmidomain] + ) + self.conserver_master = config.conserver_master + self.conserver_port = config.conserver_port + conserver_client_found = psutil.Popen( + 'which console', + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT).wait() == 0 + self.has_conserver = all([ + config.use_conserver is not False, + self.conserver_master, + self.conserver_port, + conserver_client_found, + ]) + + def _pexpect_spawn_ipmi(self, ipmi_cmd): + """ + Run the cmd specified using ipmitool. + """ + full_command = self._ipmi_command(ipmi_cmd) + return self._pexpect_spawn(full_command) + + def _pexpect_spawn(self, cmd): + """ + Run a command using pexpect.spawn(). Return the child object. + """ + log.debug('pexpect command: %s', cmd) + return pexpect.spawn( + cmd, + logfile=self.logfile, + ) + + def _get_console(self, readonly=True): + def start(): + cmd = self._console_command(readonly=readonly) + return self._pexpect_spawn(cmd) + + child = start() + if self.has_conserver and not child.isalive(): + log.error("conserver failed to get the console; will try ipmitool") + self.has_conserver = False + child = start() + return child + + def _console_command(self, readonly=True): + if self.has_conserver: + return 'console -M {master} -p {port} {mode} {host}'.format( + master=self.conserver_master, + port=self.conserver_port, + mode='-s' if readonly else '-f', + host=self.shortname, + ) + else: + return self._ipmi_command('sol activate') + + def _ipmi_command(self, subcommand): + self._check_ipmi_credentials() + template = \ + 'ipmitool -H {s}.{dn} -I lanplus -U {ipmiuser} -P {ipmipass} {cmd}' + return template.format( + cmd=subcommand, + s=self.shortname, + dn=self.ipmidomain, + ipmiuser=self.ipmiuser, + ipmipass=self.ipmipass, + ) + + def _check_ipmi_credentials(self): + if not self.has_ipmi_credentials: + log.error( + "Must set ipmi_user, ipmi_password, and ipmi_domain in " + ".teuthology.yaml" + ) + + def _exit_session(self, child, timeout=None): + t = timeout or self.timeout + if self.has_conserver: + child.sendcontrol('e') + child.send('c.') + r = child.expect( + ['[disconnect]', pexpect.TIMEOUT, pexpect.EOF], + timeout=t) + if r != 0: + child.kill(15) + else: + child.send('~.') + r = child.expect( + ['terminated ipmitool', pexpect.TIMEOUT, pexpect.EOF], + timeout=t) + if r != 0: + self._pexpect_spawn_ipmi('sol deactivate') + + def _wait_for_login(self, timeout=None, attempts=2): + """ + Wait for login. Retry if timeouts occur on commands. + """ + t = timeout or self.timeout + log.debug('Waiting for login prompt on {s}'.format(s=self.shortname)) + # wait for login prompt to indicate boot completed + for i in range(0, attempts): + start = time.time() + while time.time() - start < t: + child = self._get_console(readonly=False) + child.send('\n') + log.debug('expect: {s} login'.format(s=self.shortname)) + r = child.expect( + ['{s} login: '.format(s=self.shortname), + pexpect.TIMEOUT, + pexpect.EOF], + timeout=(t - (time.time() - start))) + log.debug('expect before: {b}'.format(b=child.before)) + log.debug('expect after: {a}'.format(a=child.after)) + + self._exit_session(child) + if r == 0: + return + raise ConsoleError("Did not get a login prompt from %s!" % self.name) + + def check_power(self, state, timeout=None): + """ + Check power. Retry if EOF encountered on power check read. + """ + timeout = timeout or self.timeout + sleep_time = 4.0 + with safe_while( + sleep=sleep_time, + tries=int(timeout / sleep_time), + _raise=False, + action='wait for power %s' % state) as proceed: + while proceed(): + c = self._pexpect_spawn_ipmi('power status') + r = c.expect(['Chassis Power is {s}'.format( + s=state), pexpect.EOF, pexpect.TIMEOUT], timeout=1) + if r == 0: + return True + return False + + def check_status(self, timeout=None): + """ + Check status. Returns True if console is at login prompt + """ + try: + # check for login prompt at console + self._wait_for_login(timeout) + return True + except Exception as e: + log.info('Failed to get ipmi console status for {s}: {e}'.format( + s=self.shortname, e=e)) + return False + + def power_cycle(self, timeout=300): + """ + Power cycle and wait for login. + + :param timeout: How long to wait for login + """ + log.info('Power cycling {s}'.format(s=self.shortname)) + child = self._pexpect_spawn_ipmi('power cycle') + child.expect('Chassis Power Control: Cycle', timeout=self.timeout) + self._wait_for_login(timeout=timeout) + log.info('Power cycle for {s} completed'.format(s=self.shortname)) + + def hard_reset(self, wait_for_login=True): + """ + Perform physical hard reset. Retry if EOF returned from read + and wait for login when complete. + """ + log.info('Performing hard reset of {s}'.format(s=self.shortname)) + start = time.time() + while time.time() - start < self.timeout: + child = self._pexpect_spawn_ipmi('power reset') + r = child.expect(['Chassis Power Control: Reset', pexpect.EOF], + timeout=self.timeout) + if r == 0: + break + if wait_for_login: + self._wait_for_login() + log.info('Hard reset for {s} completed'.format(s=self.shortname)) + + def power_on(self): + """ + Physical power on. Loop checking cmd return. + """ + log.info('Power on {s}'.format(s=self.shortname)) + start = time.time() + while time.time() - start < self.timeout: + child = self._pexpect_spawn_ipmi('power on') + r = child.expect(['Chassis Power Control: Up/On', pexpect.EOF], + timeout=self.timeout) + if r == 0: + break + if self.check_power('on'): + log.info('Power on for {s} completed'.format(s=self.shortname)) + else: + err_msg = 'Failed to power on {s}'.format(s=self.shortname) + raise RuntimeError(err_msg) + + def power_off(self): + """ + Physical power off. Loop checking cmd return. + """ + log.info('Power off {s}'.format(s=self.shortname)) + start = time.time() + while time.time() - start < self.timeout: + child = self._pexpect_spawn_ipmi('power off') + r = child.expect(['Chassis Power Control: Down/Off', pexpect.EOF], + timeout=self.timeout) + if r == 0: + break + if self.check_power('off', 60): + log.info('Power off for {s} completed'.format(s=self.shortname)) + else: + log.error('Failed to power off {s}'.format(s=self.shortname)) + + def power_off_for_interval(self, interval=30): + """ + Physical power off for an interval. Wait for login when complete. + + :param interval: Length of power-off period. + """ + log.info('Power off {s} for {i} seconds'.format( + s=self.shortname, i=interval)) + child = self._pexpect_spawn_ipmi('power off') + child.expect('Chassis Power Control: Down/Off', timeout=self.timeout) + + time.sleep(interval) + + child = self._pexpect_spawn_ipmi('power on') + child.expect('Chassis Power Control: Up/On', timeout=self.timeout) + self._wait_for_login() + log.info('Power off for {i} seconds completed'.format(i=interval)) + + def spawn_sol_log(self, dest_path): + """ + Using the subprocess module, spawn an ipmitool process using 'sol + activate' and redirect its output to a file. + + :returns: a psutil.Popen object + """ + pexpect_templ = \ + "import pexpect; " \ + "pexpect.run('{cmd}', logfile=open('{log}', 'wb'), timeout=None)" + + def start(): + console_cmd = self._console_command() + # use sys.executable to find python rather than /usr/bin/env. + # The latter relies on PATH, which is set in a virtualenv + # that's been activated, but is not set when binaries are + # run directly from the virtualenv's bin/ directory. + python_cmd = [ + sys.executable, '-c', + pexpect_templ.format( + cmd=console_cmd, + log=dest_path, + ), + ] + return psutil.Popen( + python_cmd, + env=os.environ, + ) + + proc = start() + if self.has_conserver and proc.poll() is not None: + log.error("conserver failed to get the console; will try ipmitool") + self.has_conserver = False + proc = start() + return proc + + +class VirtualConsole(RemoteConsole): + """ + Virtual Console (set from getRemoteConsole) + """ + def __init__(self, name): + if libvirt is None: + raise RuntimeError("libvirt not found") + + self.shortname = self.getShortName(name) + status_info = teuthology.lock.query.get_status(self.shortname) + try: + if teuthology.lock.query.is_vm(status=status_info): + phys_host = status_info['vm_host']['name'].split('.')[0] + except TypeError: + raise RuntimeError("Cannot create a virtual console for %s", name) + self.connection = libvirt.open(phys_host) + for i in self.connection.listDomainsID(): + d = self.connection.lookupByID(i) + if d.name() == self.shortname: + self.vm_domain = d + break + return + + def check_power(self, state, timeout=None): + """ + Return true if vm domain state indicates power is on. + """ + return self.vm_domain.info[0] in [libvirt.VIR_DOMAIN_RUNNING, + libvirt.VIR_DOMAIN_BLOCKED, + libvirt.VIR_DOMAIN_PAUSED] + + def check_status(self, timeout=None): + """ + Return true if running. + """ + return self.vm_domain.info()[0] == libvirt.VIR_DOMAIN_RUNNING + + def power_cycle(self): + """ + Simiulate virtual machine power cycle + """ + self.vm_domain.info().destroy() + self.vm_domain.info().create() + + def hard_reset(self): + """ + Simiulate hard reset + """ + self.vm_domain.info().destroy() + + def power_on(self): + """ + Simiulate power on + """ + self.vm_domain.info().create() + + def power_off(self): + """ + Simiulate power off + """ + self.vm_domain.info().destroy() + + def power_off_for_interval(self, interval=30): + """ + Simiulate power off for an interval. + """ + log.info('Power off {s} for {i} seconds'.format( + s=self.shortname, i=interval)) + self.vm_domain.info().destroy() + time.sleep(interval) + self.vm_domain.info().create() + log.info('Power off for {i} seconds completed'.format(i=interval)) diff --git a/teuthology/orchestra/daemon/__init__.py b/teuthology/orchestra/daemon/__init__.py new file mode 100644 index 0000000000..ff8be0c674 --- /dev/null +++ b/teuthology/orchestra/daemon/__init__.py @@ -0,0 +1 @@ +from teuthology.orchestra.daemon.group import DaemonGroup # noqa diff --git a/teuthology/orchestra/daemon/cephadmunit.py b/teuthology/orchestra/daemon/cephadmunit.py new file mode 100644 index 0000000000..9b579da08e --- /dev/null +++ b/teuthology/orchestra/daemon/cephadmunit.py @@ -0,0 +1,177 @@ +import logging + +from teuthology.orchestra.daemon.state import DaemonState + +log = logging.getLogger(__name__) + +class CephadmUnit(DaemonState): + def __init__(self, remote, role, id_, *command_args, + **command_kwargs): + super(CephadmUnit, self).__init__( + remote, role, id_, *command_args, **command_kwargs) + self._set_commands() + self.log = command_kwargs.get('logger', log) + self.use_cephadm = command_kwargs.get('use_cephadm') + self.is_started = command_kwargs.get('started', False) + if self.is_started: + self._start_logger() + + def name(self): + return '%s.%s' % (self.type_, self.id_) + + def _get_systemd_cmd(self, action): + return ' '.join([ + 'sudo', 'systemctl', + action, + 'ceph-%s@%s.%s' % (self.fsid, self.type_, self.id_), + ]) + + def _set_commands(self): + self.start_cmd = self._get_systemd_cmd('start') + self.stop_cmd = self._get_systemd_cmd('stop') + self.restart_cmd = self._get_systemd_cmd('restart') + self.show_cmd = self._get_systemd_cmd('show') + self.status_cmd = self._get_systemd_cmd('status') + + def kill_cmd(self, sig): + return ' '.join([ + 'sudo', 'docker', 'kill', + '-s', str(int(sig)), + 'ceph-%s-%s.%s' % (self.fsid, self.type_, self.id_), + ]) + + def _start_logger(self): + name = '%s.%s' % (self.type_, self.id_) + #self.log.info('_start_logger %s' % name) + self.remote_logger = self.remote.run( + args=['sudo', 'journalctl', + '-f', + '-n', '0', + '-u', + 'ceph-%s@%s.service' % (self.fsid, name) + ], + logger=logging.getLogger('journalctl@' + self.cluster + '.' + name), + label=name, + wait=False, + check_status=False, + ) + + def _stop_logger(self): + name = '%s.%s' % (self.type_, self.id_) + # this is a horrible kludge, since i don't know how else to kill + # the journalctl process at the other end :( + #self.log.info('_stop_logger %s running pkill' % name) + self.remote.run( + args=['sudo', 'pkill', '-f', + ' '.join(['journalctl', + '-f', + '-n', '0', + '-u', + 'ceph-%s@%s.service' % (self.fsid, name)]), + ], + check_status=False, + ) + #self.log.info('_stop_logger %s waiting') + self.remote_logger.wait() + self.remote_logger = None + #self.log.info('_stop_logger done') + + def reset(self): + """ + Does nothing in this implementation + """ + pass + + def restart(self, *args, **kwargs): + """ + Restart with a new command passed in the arguments + + :param args: positional arguments passed to remote.run + :param kwargs: keyword arguments passed to remote.run + """ + if not self.running(): + self.log.info('Restarting %s (starting--it wasn\'t running)...' % self.name()) + self._start_logger() + self.remote.sh(self.start_cmd) + self.is_started = True + else: + self.log.info('Restarting %s...' % self.name()) + self.remote.sh(self.restart_cmd) + + def restart_with_args(self, extra_args): + """ + Restart, adding new paramaters to the current command. + + :param extra_args: Extra keyword arguments to be added. + """ + raise NotImplementedError + + def running(self): + """ + Are we running? + """ + return self.is_started + + def signal(self, sig, silent=False): + """ + Send a signal to associated remote command + + :param sig: signal to send + """ + if not silent: + self.log.info('Senging signal %d to %s...' % (sig, self.name())) + # Ignore exception here because sending a singal via docker can be + # quite slow and easily race with, say, the daemon shutting down. + try: + self.remote.sh(self.kill_cmd(sig)) + except Exception as e: + self.log.info(f'Ignoring exception while sending signal: {e}') + + def start(self, timeout=300): + """ + Start this daemon instance. + """ + if self.running(): + self.log.warning('Restarting a running daemon') + self.restart() + return + self._start_logger() + self.remote.run(self.start_cmd) + + def stop(self, timeout=300): + """ + Stop this daemon instance. + + Note: this can raise a CommandFailedError, + CommandCrashedError, or ConnectionLostError. + + :param timeout: timeout to pass to orchestra.run.wait() + """ + if not self.running(): + self.log.error('Tried to stop a non-running daemon') + return + self.log.info('Stopping %s...' % self.name()) + self.remote.sh(self.stop_cmd) + self.is_started = False + self._stop_logger() + self.log.info('Stopped %s' % self.name()) + + # FIXME why are there two wait methods? + def wait(self, timeout=300): + """ + Wait for daemon to exit + + Wait for daemon to stop (but don't trigger the stop). Pass up + any exception. Mark the daemon as not running. + """ + self.log.info('Waiting for %s to exit...' % self.name()) + self.remote.sh(self.stop_cmd) + self.is_started = False + self._stop_logger() + self.log.info('Finished waiting for %s to stop' % self.name()) + + def wait_for_exit(self): + """ + clear remote run command value after waiting for exit. + """ + self.wait() diff --git a/teuthology/orchestra/daemon/group.py b/teuthology/orchestra/daemon/group.py new file mode 100644 index 0000000000..656f5a0ba1 --- /dev/null +++ b/teuthology/orchestra/daemon/group.py @@ -0,0 +1,180 @@ +from teuthology import misc +from teuthology.orchestra.daemon.state import DaemonState +from teuthology.orchestra.daemon.systemd import SystemDState +from teuthology.orchestra.daemon.cephadmunit import CephadmUnit + + +class DaemonGroup(object): + """ + Collection of daemon state instances + """ + def __init__(self, use_systemd=False, use_cephadm=None): + """ + self.daemons is a dictionary indexed by role. Each entry is a + dictionary of DaemonState values indexed by an id parameter. + + :param use_systemd: Whether or not to use systemd when appropriate + (default: False) Note: This option may be removed + in the future. + """ + self.daemons = {} + self.use_systemd = use_systemd + self.use_cephadm = use_cephadm + + def add_daemon(self, remote, type_, id_, *args, **kwargs): + """ + Add a daemon. If there already is a daemon for this id_ and role, stop + that daemon. (Re)start the daemon once the new value is set. + + :param remote: Remote site + :param type_: type of daemon (osd, mds, mon, rgw, for example) + :param id_: Id (index into role dictionary) + :param args: Daemonstate positional parameters + :param kwargs: Daemonstate keyword parameters + """ + # for backwards compatibility with older ceph-qa-suite branches, + # we can only get optional args from unused kwargs entries + self.register_daemon(remote, type_, id_, *args, **kwargs) + cluster = kwargs.pop('cluster', 'ceph') + role = cluster + '.' + type_ + self.daemons[role][id_].restart() + + def register_daemon(self, remote, type_, id_, *args, **kwargs): + """ + Add a daemon. If there already is a daemon for this id_ and role, stop + that daemon. + + :param remote: Remote site + :param type_: type of daemon (osd, mds, mon, rgw, for example) + :param id_: Id (index into role dictionary) + :param args: Daemonstate positional parameters + :param kwargs: Daemonstate keyword parameters + """ + # for backwards compatibility with older ceph-qa-suite branches, + # we can only get optional args from unused kwargs entries + cluster = kwargs.pop('cluster', 'ceph') + role = cluster + '.' + type_ + if role not in self.daemons: + self.daemons[role] = {} + if id_ in self.daemons[role]: + self.daemons[role][id_].stop() + self.daemons[role][id_] = None + + klass = DaemonState + if self.use_cephadm: + klass = CephadmUnit + kwargs['use_cephadm'] = self.use_cephadm + elif self.use_systemd and \ + not any(i == 'valgrind' for i in args) and \ + remote.init_system == 'systemd': + # We currently cannot use systemd and valgrind together because + # it would require rewriting the unit files + klass = SystemDState + self.daemons[role][id_] = klass( + remote, role, id_, *args, **kwargs) + + def get_daemon(self, type_, id_, cluster='ceph'): + """ + get the daemon associated with this id_ for this role. + + :param type_: type of daemon (osd, mds, mon, rgw, for example) + :param id_: Id (index into role dictionary) + """ + role = cluster + '.' + type_ + if role not in self.daemons: + return None + return self.daemons[role].get(str(id_), None) + + def iter_daemons_of_role(self, type_, cluster='ceph'): + """ + Iterate through all daemon instances for this role. Return dictionary + of daemon values. + + :param type_: type of daemon (osd, mds, mon, rgw, for example) + """ + role = cluster + '.' + type_ + return self.daemons.get(role, {}).values() + + def resolve_role_list(self, roles, types, cluster_aware=False): + """ + Resolve a configuration setting that may be None or contain wildcards + into a list of roles (where a role is e.g. 'mds.a' or 'osd.0'). This + is useful for tasks that take user input specifying a flexible subset + of the available roles. + + The task calling this must specify what kinds of roles it can can + handle using the ``types`` argument, where a role type is 'osd' or + 'mds' for example. When selecting roles this is used as a filter, or + when an explicit list of roles is passed, the an exception is raised if + any are not of a suitable type. + + Examples: + + :: + + # Passing None (i.e. user left config blank) defaults to all roles + # (filtered by ``types``) + None, types=['osd', 'mds', 'mon'] -> + ['osd.0', 'osd.1', 'osd.2', 'mds.a', mds.b', 'mon.a'] + # Wildcards are expanded + roles=['mds.*', 'osd.0'], types=['osd', 'mds', 'mon'] -> + ['mds.a', 'mds.b', 'osd.0'] + # Boring lists are unaltered + roles=['osd.0', 'mds.a'], types=['osd', 'mds', 'mon'] -> + ['osd.0', 'mds.a'] + # Entries in role list that don't match types result in an + # exception + roles=['osd.0', 'mds.a'], types=['osd'] -> RuntimeError + + :param roles: List (of roles or wildcards) or None (select all suitable + roles) + :param types: List of acceptable role types, for example + ['osd', 'mds']. + :param cluster_aware: bool to determine whether to consider include + cluster in the returned roles - just for + backwards compatibility with pre-jewel versions + of ceph-qa-suite + :return: List of strings like ["mds.0", "osd.2"] + """ + assert (isinstance(roles, list) or roles is None) + + resolved = [] + if roles is None: + # Handle default: all roles available + for type_ in types: + for role, daemons in self.daemons.items(): + if not role.endswith('.' + type_): + continue + for daemon in daemons.values(): + prefix = type_ + if cluster_aware: + prefix = daemon.role + resolved.append(prefix + '.' + daemon.id_) + else: + # Handle explicit list of roles or wildcards + for raw_role in roles: + try: + cluster, role_type, role_id = misc.split_role(raw_role) + except ValueError: + msg = ("Invalid role '{0}', roles must be of format " + "[.].").format(raw_role) + raise RuntimeError(msg) + + if role_type not in types: + msg = "Invalid role type '{0}' in role '{1}'".format( + role_type, raw_role) + raise RuntimeError(msg) + + if role_id == "*": + # Handle wildcard, all roles of the type + for daemon in self.iter_daemons_of_role(role_type, + cluster=cluster): + prefix = role_type + if cluster_aware: + prefix = daemon.role + resolved.append(prefix + '.' + daemon.id_) + else: + # Handle explicit role + resolved.append(raw_role) + + return resolved diff --git a/teuthology/orchestra/daemon/state.py b/teuthology/orchestra/daemon/state.py new file mode 100644 index 0000000000..c3b6ddad93 --- /dev/null +++ b/teuthology/orchestra/daemon/state.py @@ -0,0 +1,171 @@ +import logging +import struct + +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +class DaemonState(object): + """ + Daemon State. A daemon exists for each instance of each role. + """ + def __init__(self, remote, role, id_, *command_args, **command_kwargs): + """ + Pass remote command information as parameters to remote site + + :param remote: Remote site + :param role: Role (osd, rgw, mon, mds) + :param id_: Id within role (osd.1, osd.2, for eaxmple) + :param command_args: positional arguments (used in restart commands) + :param command_kwargs: keyword arguments (used in restart commands) + """ + self.remote = remote + self.command_args = command_args + self.role = role + self.cluster, self.type_ = self.role.split('.')[0:2] + self.id_ = id_ + self.log = command_kwargs.get('logger', log) + self.fsid = command_kwargs.pop('fsid', None) + self.proc = None + self.command_kwargs = command_kwargs + + def check_status(self): + """ + Check to see if the process has exited. + + :returns: The exit status, if any + :raises: CommandFailedError, if the process was run with + check_status=True + """ + if self.proc: + return self.proc.poll() + + @property + def pid(self): + raise NotImplementedError + + def reset(self): + """ + clear remote run command value. + """ + self.proc = None + + def restart(self, *args, **kwargs): + """ + Restart with a new command passed in the arguments + + :param args: positional arguments passed to remote.run + :param kwargs: keyword arguments passed to remote.run + """ + self.log.info('Restarting daemon') + if self.proc is not None: + self.log.info('Stopping old one...') + self.stop() + cmd_args = list(self.command_args) + cmd_args.extend(args) + cmd_kwargs = self.command_kwargs + cmd_kwargs.update(kwargs) + self.proc = self.remote.run(*cmd_args, **cmd_kwargs) + self.log.info('Started') + + def restart_with_args(self, extra_args): + """ + Restart, adding new paramaters to the current command. + + :param extra_args: Extra keyword arguments to be added. + """ + self.log.info('Restarting daemon with args') + if self.proc is not None: + self.log.info('Stopping old one...') + self.stop() + cmd_args = list(self.command_args) + # we only want to make a temporary mod of the args list + # so we shallow copy the dict, and deepcopy the args list + cmd_kwargs = self.command_kwargs.copy() + from copy import deepcopy + cmd_kwargs['args'] = deepcopy(self.command_kwargs['args']) + cmd_kwargs['args'].extend(extra_args) + self.proc = self.remote.run(*cmd_args, **cmd_kwargs) + self.log.info('Started') + + def running(self): + """ + Are we running? + :return: True if remote run command value is set, False otherwise. + """ + return self.proc is not None + + def signal(self, sig, silent=False): + """ + Send a signal to associated remote command. + + :param sig: signal to send + """ + if self.running(): + try: + self.proc.stdin.write(struct.pack('!b', sig)) + except IOError as e: + log.exception('Failed to send signal %d: %s', sig, e.strerror) + if not silent: + self.log.info('Sent signal %d', sig) + else: + self.log.error('No such daemon running') + + def start(self, timeout=300): + """ + Start this daemon instance. + """ + if self.running(): + self.log.warning('Restarting a running daemon') + self.restart() + + def stop(self, timeout=300): + """ + Stop this daemon instance. + + Note: this can raise a CommandFailedError, + CommandCrashedError, or ConnectionLostError. + + :param timeout: timeout to pass to orchestra.run.wait() + """ + if not self.running(): + self.log.error('tried to stop a non-running daemon') + return + self.proc.stdin.close() + self.log.debug('waiting for process to exit') + try: + run.wait([self.proc], timeout=timeout) + except CommandFailedError: + log.exception("Error while waiting for process to exit") + self.proc = None + self.log.info('Stopped') + + # FIXME why are there two wait methods? + def wait(self, timeout=300): + """ + Wait for daemon to exit + + Wait for daemon to stop (but don't trigger the stop). Pass up + any exception. Mark the daemon as not running. + """ + self.log.debug('waiting for process to exit') + try: + run.wait([self.proc], timeout=timeout) + self.log.info('Stopped') + except: + self.log.info('Failed') + raise + finally: + self.proc = None + + def wait_for_exit(self): + """ + clear remote run command value after waiting for exit. + """ + if self.proc: + try: + run.wait([self.proc]) + finally: + self.proc = None diff --git a/teuthology/orchestra/daemon/systemd.py b/teuthology/orchestra/daemon/systemd.py new file mode 100644 index 0000000000..fd833b84fb --- /dev/null +++ b/teuthology/orchestra/daemon/systemd.py @@ -0,0 +1,229 @@ +import logging +import re + +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra import run +from teuthology.orchestra.daemon.state import DaemonState + +log = logging.getLogger(__name__) + +systemd_cmd_templ = 'sudo systemctl {action} {daemon}@{id_}' + + +class SystemDState(DaemonState): + def __init__(self, remote, role, id_, *command_args, + **command_kwargs): + super(SystemDState, self).__init__( + remote, role, id_, *command_args, **command_kwargs) + self._set_commands() + self.log = command_kwargs.get('logger', log) + + @property + def daemon_type(self): + if self.type_ == 'rgw': + return 'radosgw' + return self.type_ + + def _get_systemd_cmd(self, action): + cmd = systemd_cmd_templ.format( + action=action, + daemon='%s-%s' % (self.cluster, self.daemon_type), + id_=self.id_.replace('client.', ''), + ) + return cmd + + def _set_commands(self): + self.start_cmd = self._get_systemd_cmd('start') + self.stop_cmd = self._get_systemd_cmd('stop') + self.restart_cmd = self._get_systemd_cmd('restart') + self.show_cmd = self._get_systemd_cmd('show') + self.status_cmd = self._get_systemd_cmd('status') + cluster_and_type = '%s-%s' % (self.cluster, self.daemon_type) + if self.type_ == self.daemon_type: + syslog_id = cluster_and_type + else: + syslog_id = self.daemon_type + self.output_cmd = 'sudo journalctl -u ' \ + '{0}@{1} -t {2} -n 10'.format( + cluster_and_type, + self.id_.replace('client.', ''), + syslog_id, + ) + + def check_status(self): + """ + Check to see if the process has exited. + + :returns: The exit status, if any + :raises: CommandFailedError, if the process was run with + check_status=True + """ + output = self.remote.sh(self.show_cmd + ' | grep -i state') + + def parse_line(line): + key, value = line.strip().split('=', 1) + return {key.strip(): value.strip()} + + show_dict = dict() + + for line in output.split('\n'): + # skip empty and commented string + if not line or line.startswith("#"): + continue + show_dict.update(parse_line(line)) + + active_state = show_dict['ActiveState'] + sub_state = show_dict['SubState'] + if active_state == 'active': + return None + self.log.info("State is: %s/%s", active_state, sub_state) + out = self.remote.sh( + # This will match a line like: + # Main PID: 13394 (code=exited, status=1/FAILURE) + # Or (this is wrapped): + # Apr 26 21:29:33 ovh083 systemd[1]: ceph-osd@1.service: + # Main process exited, code=exited, status=1/FAILURE + self.status_cmd + " | grep 'Main.*code=exited'", + ) + line = out.strip().split('\n')[-1] + exit_code = int(re.match('.*status=(\d+).*', line).groups()[0]) + if exit_code: + self.remote.run( + args=self.output_cmd + ) + raise CommandFailedError( + self.start_cmd, + exit_code, + self.remote, + ) + return exit_code + + @property + def pid(self): + """ + Method to retrieve daemon process id + """ + proc_name = 'ceph-%s' % self.type_ + + # process regex to match OSD, MON, MGR, MDS process command string + # eg. "/usr/bin/ceph- -f --cluster ceph --id " + proc_regex = '"%s.*--id %s "' % (proc_name, self.id_) + + # process regex to match RADOSGW process command string + # eg. "/usr/bin/radosgw -f --cluster ceph --name " + if self.type_ == "rgw": + proc_regex = '"{}.*--name.*{}"'.format(self.daemon_type, self.id_) + + args = ['ps', '-ef', + run.Raw('|'), + 'grep', + run.Raw(proc_regex), + run.Raw('|'), + 'grep', '-v', + 'grep', run.Raw('|'), + 'awk', + run.Raw("{'print $2'}")] + pid_string = self.remote.sh(args).strip() + if not pid_string.isdigit(): + return None + return int(pid_string) + + def reset(self): + """ + Does nothing in this implementation + """ + pass + + def restart(self, *args, **kwargs): + """ + Restart with a new command passed in the arguments + + :param args: positional arguments passed to remote.run + :param kwargs: keyword arguments passed to remote.run + """ + self.log.info('Restarting daemon using systemd') + if not self.running(): + self.log.info('starting a non-running daemon') + self.remote.run(args=[run.Raw(self.start_cmd)]) + else: + self.remote.run(args=[run.Raw(self.restart_cmd)]) + # check status will also fail if the process hasn't restarted + self.check_status() + + def restart_with_args(self, extra_args): + """ + Restart, adding new paramaters to the current command. + + :param extra_args: Extra keyword arguments to be added. + """ + self.log.warning( + "restart_with_args() is not supported with systemd; performing" + "normal restart") + self.restart() + + def running(self): + """ + Are we running? + :return: The PID if remote run command value is set, False otherwise. + """ + pid = self.pid + if pid is None: + return None + elif pid <= 0: + return None + else: + return pid + + def signal(self, sig, silent=False): + """ + Send a signal to associated remote command + + :param sig: signal to send + """ + self.log.warning("systemd may restart daemons automatically") + pid = self.pid + self.log.info("Sending signal %s to process %s", sig, pid) + sig = '-' + str(sig) + self.remote.run(args=['sudo', 'kill', str(sig), str(pid)]) + + def start(self, timeout=300): + """ + Start this daemon instance. + """ + if self.running(): + self.log.warning('Restarting a running daemon') + self.restart() + return + self.remote.run(args=[run.Raw(self.start_cmd)]) + + def stop(self, timeout=300): + """ + Stop this daemon instance. + + Note: this can raise a CommandFailedError, + CommandCrashedError, or ConnectionLostError. + + :param timeout: timeout to pass to orchestra.run.wait() + """ + if not self.running(): + self.log.error('tried to stop a non-running daemon') + return + self.remote.run(args=[run.Raw(self.stop_cmd)]) + self.log.info('Stopped') + + # FIXME why are there two wait methods? + def wait(self, timeout=300): + """ + Wait for daemon to exit + + Wait for daemon to stop (but don't trigger the stop). Pass up + any exception. Mark the daemon as not running. + """ + self.log.error("wait() not suported in systemd") + + def wait_for_exit(self): + """ + clear remote run command value after waiting for exit. + """ + # TODO: This ought to be possible, no? + self.log.error("wait_for_exit() is not supported with systemd") diff --git a/teuthology/orchestra/monkey.py b/teuthology/orchestra/monkey.py new file mode 100644 index 0000000000..e13e77305e --- /dev/null +++ b/teuthology/orchestra/monkey.py @@ -0,0 +1,56 @@ +""" +Monkey patches (paramiko support) +""" +import logging + +log = logging.getLogger(__name__) + +def patch_001_paramiko_deprecation(): + """ + Silence an an unhelpful Deprecation Warning triggered by Paramiko. + + Not strictly a monkeypatch. + """ + import warnings + warnings.filterwarnings( + category=DeprecationWarning, + message='This application uses RandomPool,', + action='ignore', + ) + + +def patch_100_paramiko_log(): + """ + Silence some noise paramiko likes to log. + + Not strictly a monkeypatch. + """ + logging.getLogger('paramiko.transport').setLevel(logging.WARNING) + + +def patch_100_logger_getChild(): + """ + Imitate Python 2.7 feature Logger.getChild. + """ + import logging + if not hasattr(logging.Logger, 'getChild'): + def getChild(self, name): + return logging.getLogger('.'.join([self.name, name])) + logging.Logger.getChild = getChild + + +def patch_100_trigger_rekey(): + # Fixes http://tracker.ceph.com/issues/15236 + from paramiko.packet import Packetizer + Packetizer._trigger_rekey = lambda self: True + + +def patch_all(): + """ + Run all the patch_* functions in this module. + """ + monkeys = [(k, v) for (k, v) in globals().items() if k.startswith('patch_') and k != 'patch_all'] + monkeys.sort() + for k, v in monkeys: + log.debug('Patching %s', k) + v() diff --git a/teuthology/orchestra/opsys.py b/teuthology/orchestra/opsys.py new file mode 100644 index 0000000000..d912657475 --- /dev/null +++ b/teuthology/orchestra/opsys.py @@ -0,0 +1,241 @@ +import re + +DISTRO_CODENAME_MAP = { + "ubuntu": { + "22.04": "jammy", + "20.04": "focal", + "18.04": "bionic", + "17.10": "artful", + "17.04": "zesty", + "16.10": "yakkety", + "16.04": "xenial", + "15.10": "wily", + "15.04": "vivid", + "14.10": "utopic", + "14.04": "trusty", + "13.10": "saucy", + "12.04": "precise", + }, + "debian": { + "7": "wheezy", + "8": "jessie", + "9": "stretch", + }, + "rhel": { + "9": "plow", + "8": "ootpa", + "7": "maipo", + "6": "santiago", + }, + "centos": { + "9": "stream", + "8": "core", + "7": "core", + "6": "core", + }, + "fedora": { + "28": "28", + "27": "27", + "26": "26", + "25": "25", + "24": "24", + "23": "23", + "22": "22", + "21": "21", + "20": "heisenbug", + }, + "opensuse": { + "15.0": "leap", + "15.1": "leap", + "15.2": "leap", + "42.2": "leap", + "42.3": "leap", + }, + "sle": { + "12.1": "sle", + "12.2": "sle", + "12.3": "sle", + "15.0": "sle", + "15.1": "sle", + "15.2": "sle", + }, +} + +DEFAULT_OS_VERSION = dict( + ubuntu="20.04", + fedora="25", + centos="8.stream", + opensuse="15.0", + sle="15.0", + rhel="8.6", + debian='8.0' +) + + +class OS(object): + """ + Class that parses either /etc/os-release or the output of 'lsb_release -a' + and provides OS name and version information. + + Must be initialized with OS.from_lsb_release or OS.from_os_release + """ + + __slots__ = ['name', 'version', 'codename', 'package_type'] + + _deb_distros = ('debian', 'ubuntu') + _rpm_distros = ('fedora', 'rhel', 'centos', 'opensuse', 'sle') + + def __init__(self, name=None, version=None, codename=None): + self.name = name + self.version = version or self._codename_to_version(name, codename) + self.codename = codename or self._version_to_codename(name, version) + self._set_package_type() + + @staticmethod + def _version_to_codename(name, version): + for (_version, codename) in DISTRO_CODENAME_MAP[name].items(): + if str(version) == _version or str(version).split('.')[0] == _version: + return codename + + @staticmethod + def _codename_to_version(name, codename): + for (version, _codename) in DISTRO_CODENAME_MAP[name].items(): + if codename == _codename: + return version + raise RuntimeError("No version found for %s %s !" % ( + name, + codename, + )) + + @classmethod + def from_lsb_release(cls, lsb_release_str): + """ + Parse output from lsb_release -a and populate attributes + + Given output like: + Distributor ID: Ubuntu + Description: Ubuntu 12.04.4 LTS + Release: 12.04 + Codename: precise + + Attributes will be: + name = 'ubuntu' + version = '12.04' + codename = 'precise' + Additionally, we set the package type: + package_type = 'deb' + """ + str_ = lsb_release_str.strip() + name = cls._get_value(str_, 'Distributor ID') + if name == 'RedHatEnterpriseServer': + name = 'rhel' + elif name.startswith('openSUSE'): + name = 'opensuse' + elif name.startswith('SUSE'): + name = 'sle' + name = name.lower() + + version = cls._get_value(str_, 'Release') + codename = cls._get_value(str_, 'Codename').lower() + obj = cls(name=name, version=version, codename=codename) + + return obj + + @classmethod + def from_os_release(cls, os_release_str): + """ + Parse /etc/os-release and populate attributes + + Given output like: + NAME="Ubuntu" + VERSION="12.04.4 LTS, Precise Pangolin" + ID=ubuntu + ID_LIKE=debian + PRETTY_NAME="Ubuntu precise (12.04.4 LTS)" + VERSION_ID="12.04" + + Attributes will be: + name = 'ubuntu' + version = '12.04' + codename = None + Additionally, we set the package type: + package_type = 'deb' + """ + str_ = os_release_str.strip() + name = cls._get_value(str_, 'ID').lower() + if name == 'sles': + name = 'sle' + elif name == 'opensuse-leap': + name = 'opensuse' + elif name == 'opensuse-tumbleweed': + name = 'opensuse' + version = cls._get_value(str_, 'VERSION_ID') + obj = cls(name=name, version=version) + + return obj + + + @classmethod + def version_codename(cls, name, version_or_codename): + """ + Return (version, codename) based on one input, trying to infer + which we're given + """ + codename = None + version = None + + try: + codename = OS._version_to_codename(name, version_or_codename) + except KeyError: + pass + + try: + version = OS._codename_to_version(name, version_or_codename) + except (KeyError, RuntimeError): + pass + + if version: + codename = version_or_codename + elif codename: + version = version_or_codename + else: + raise KeyError('%s not a %s version or codename' % + (version_or_codename, name)) + return version, codename + + + @staticmethod + def _get_value(str_, name): + regex = '^%s[:=](.+)' % name + match = re.search(regex, str_, flags=re.M) + if match: + return match.groups()[0].strip(' \t"\'') + return '' + + def _set_package_type(self): + if self.name in self._deb_distros: + self.package_type = "deb" + elif self.name in self._rpm_distros: + self.package_type = "rpm" + + def to_dict(self): + return dict( + name=self.name, + version=self.version, + codename=self.codename, + ) + + def __str__(self): + return " ".join([self.name, self.version]).strip() + + def __repr__(self): + return "OS(name={name}, version={version}, codename={codename})"\ + .format(name=repr(self.name), + version=repr(self.version), + codename=repr(self.codename)) + + def __eq__(self, other): + for slot in self.__slots__: + if not getattr(self, slot) == getattr(other, slot): + return False + return True diff --git a/teuthology/orchestra/remote.py b/teuthology/orchestra/remote.py new file mode 100644 index 0000000000..df269c740b --- /dev/null +++ b/teuthology/orchestra/remote.py @@ -0,0 +1,725 @@ +""" +Support for paramiko remote objects. +""" + +import teuthology.lock.query +import teuthology.lock.util +from teuthology.orchestra import run +from teuthology.orchestra import connection +from teuthology.orchestra import console +from teuthology.orchestra.opsys import OS +import teuthology.provision +from teuthology import misc +from teuthology.exceptions import CommandFailedError +from teuthology.misc import host_shortname +import errno +import time +import re +import logging +from io import BytesIO +from io import StringIO +import os +import pwd +import tempfile +import netaddr + +log = logging.getLogger(__name__) + + +class RemoteShell(object): + """ + Contains methods to run miscellaneous shell commands on remote machines. + + These methods were originally part of orchestra.remote.Remote. The reason + for moving these methods from Remote is that applications that use + teuthology for testing usually have programs that can run tests locally on + a single node machine for development work (for example, vstart_runner.py + in case of Ceph). These programs can import and reuse these methods + without having to deal SSH stuff. In short, this class serves a shared + interface. + + To use these methods, inherit the class here and implement "run()" method in + the subclass. + """ + + def remove(self, path): + self.run(args=['rm', '-fr', path]) + + def mkdtemp(self, suffix=None, parentdir=None): + """ + Create a temporary directory on remote machine and return it's path. + """ + args = ['mktemp', '-d'] + + if suffix: + args.append('--suffix=%s' % suffix) + if parentdir: + args.append('--tmpdir=%s' % parentdir) + + return self.sh(args).strip() + + def mktemp(self, suffix=None, parentdir=None, data=None): + """ + Make a remote temporary file. + + :param suffix: suffix for the temporary file + :param parentdir: parent dir where temp file should be created + :param data: write data to the file if provided + + Returns: the path of the temp file created. + """ + args = ['mktemp'] + if suffix: + args.append('--suffix=%s' % suffix) + if parentdir: + args.append('--tmpdir=%s' % parentdir) + + path = self.sh(args).strip() + + if data: + self.write_file(path=path, data=data) + + return path + + def sh(self, script, **kwargs): + """ + Shortcut for run method. + + Usage: + my_name = remote.sh('whoami') + remote_date = remote.sh('date') + """ + if 'stdout' not in kwargs: + kwargs['stdout'] = BytesIO() + if 'args' not in kwargs: + kwargs['args'] = script + proc = self.run(**kwargs) + out = proc.stdout.getvalue() + if isinstance(out, bytes): + return out.decode() + else: + return out + + def sh_file(self, script, label="script", sudo=False, **kwargs): + """ + Run shell script after copying its contents to a remote file + + :param script: string with script text, or file object + :param sudo: run command with sudo if True, + run as user name if string value (defaults to False) + :param label: string value which will be part of file name + Returns: stdout + """ + ftempl = '/tmp/teuthology-remote-$(date +%Y%m%d%H%M%S)-{}-XXXX'\ + .format(label) + script_file = self.sh("mktemp %s" % ftempl).strip() + self.sh("cat - | tee {script} ; chmod a+rx {script}"\ + .format(script=script_file), stdin=script) + if sudo: + if isinstance(sudo, str): + command="sudo -u %s %s" % (sudo, script_file) + else: + command="sudo %s" % script_file + else: + command="%s" % script_file + + return self.sh(command, **kwargs) + + def chmod(self, file_path, permissions): + """ + As super-user, set permissions on the remote file specified. + """ + args = [ + 'sudo', + 'chmod', + permissions, + file_path, + ] + self.run( + args=args, + ) + + def chcon(self, file_path, context): + """ + Set the SELinux context of a given file. + + VMs and non-RPM-based hosts will skip this operation because ours + currently have SELinux disabled. + + :param file_path: The path to the file + :param context: The SELinux context to be used + """ + if self.os.package_type != 'rpm' or \ + self.os.name in ['opensuse', 'sle']: + return + if teuthology.lock.query.is_vm(self.shortname): + return + self.run(args="sudo chcon {con} {path}".format( + con=context, path=file_path)) + + def copy_file(self, src, dst, sudo=False, mode=None, owner=None, + mkdir=False, append=False): + """ + Copy data to remote file + + :param src: source file path on remote host + :param dst: destination file path on remote host + :param sudo: use sudo to write file, defaults False + :param mode: set file mode bits if provided + :param owner: set file owner if provided + :param mkdir: ensure the destination directory exists, defaults + False + :param append: append data to the file, defaults False + """ + dd = 'sudo dd' if sudo else 'dd' + args = dd + ' if=' + src + ' of=' + dst + if append: + args += ' conv=notrunc oflag=append' + if mkdir: + mkdirp = 'sudo mkdir -p' if sudo else 'mkdir -p' + dirpath = os.path.dirname(dst) + if dirpath: + args = mkdirp + ' ' + dirpath + '\n' + args + if mode: + chmod = 'sudo chmod' if sudo else 'chmod' + args += '\n' + chmod + ' ' + mode + ' ' + dst + if owner: + chown = 'sudo chown' if sudo else 'chown' + args += '\n' + chown + ' ' + owner + ' ' + dst + args = 'set -ex' + '\n' + args + self.run(args=args) + + def move_file(self, src, dst, sudo=False, mode=None, owner=None, + mkdir=False): + """ + Move data to remote file + + :param src: source file path on remote host + :param dst: destination file path on remote host + :param sudo: use sudo to write file, defaults False + :param mode: set file mode bits if provided + :param owner: set file owner if provided + :param mkdir: ensure the destination directory exists, defaults + False + """ + mv = 'sudo mv' if sudo else 'mv' + args = mv + ' ' + src + ' ' + dst + if mkdir: + mkdirp = 'sudo mkdir -p' if sudo else 'mkdir -p' + dirpath = os.path.dirname(dst) + if dirpath: + args = mkdirp + ' ' + dirpath + '\n' + args + if mode: + chmod = 'sudo chmod' if sudo else 'chmod' + args += ' && ' + chmod + ' ' + mode + ' ' + dst + if owner: + chown = 'sudo chown' if sudo else 'chown' + args += ' && ' + chown + ' ' + owner + ' ' + dst + self.run(args=args) + + def read_file(self, path, sudo=False, stdout=None, + offset=0, length=0): + """ + Read data from remote file + + :param path: file path on remote host + :param sudo: use sudo to read the file, defaults False + :param stdout: output object, defaults to io.BytesIO() + :param offset: number of bytes to skip from the file + :param length: number of bytes to read from the file + + :raises: :class:`FileNotFoundError`: there is no such file by the path + :raises: :class:`RuntimeError`: unexpected error occurred + + :returns: the file contents in bytes, if stdout is `io.BytesIO`, by + default + :returns: the file contents in str, if stdout is `io.StringIO` + """ + dd = 'sudo dd' if sudo else 'dd' + args = dd + ' if=' + path + ' of=/dev/stdout' + iflags=[] + # we have to set defaults here instead of the method's signature, + # because python is reusing the object from call to call + stdout = stdout or BytesIO() + if offset: + args += ' skip=' + str(offset) + iflags += 'skip_bytes' + if length: + args += ' count=' + str(length) + iflags += 'count_bytes' + if iflags: + args += ' iflag=' + ','.join(iflags) + args = 'set -ex' + '\n' + args + proc = self.run(args=args, stdout=stdout, stderr=StringIO(), + check_status=False, quiet=True) + if proc.returncode: + if 'No such file or directory' in proc.stderr.getvalue(): + raise FileNotFoundError(errno.ENOENT, + f"Cannot find file on the remote '{self.name}'", path) + else: + raise RuntimeError("Unexpected error occurred while trying to " + f"read '{path}' file on the remote '{self.name}'") + + return proc.stdout.getvalue() + + + def write_file(self, path, data, sudo=False, mode=None, owner=None, + mkdir=False, append=False): + """ + Write data to remote file + + :param path: file path on remote host + :param data: str, binary or fileobj to be written + :param sudo: use sudo to write file, defaults False + :param mode: set file mode bits if provided + :param owner: set file owner if provided + :param mkdir: preliminary create the file directory, defaults False + :param append: append data to the file, defaults False + """ + dd = 'sudo dd' if sudo else 'dd' + args = dd + ' of=' + path + if append: + args += ' conv=notrunc oflag=append' + if mkdir: + mkdirp = 'sudo mkdir -p' if sudo else 'mkdir -p' + dirpath = os.path.dirname(path) + if dirpath: + args = mkdirp + ' ' + dirpath + '\n' + args + if mode: + chmod = 'sudo chmod' if sudo else 'chmod' + args += '\n' + chmod + ' ' + mode + ' ' + path + if owner: + chown = 'sudo chown' if sudo else 'chown' + args += '\n' + chown + ' ' + owner + ' ' + path + args = 'set -ex' + '\n' + args + self.run(args=args, stdin=data, quiet=True) + + def sudo_write_file(self, path, data, **kwargs): + """ + Write data to remote file with sudo, for more info see `write_file()`. + """ + self.write_file(path, data, sudo=True, **kwargs) + + @property + def os(self): + if not hasattr(self, '_os'): + try: + os_release = self.sh('cat /etc/os-release').strip() + self._os = OS.from_os_release(os_release) + return self._os + except CommandFailedError: + pass + + lsb_release = self.sh('lsb_release -a').strip() + self._os = OS.from_lsb_release(lsb_release) + return self._os + + @property + def arch(self): + if not hasattr(self, '_arch'): + self._arch = self.sh('uname -m').strip() + return self._arch + + +class Remote(RemoteShell): + """ + A connection to a remote host. + + This is a higher-level wrapper around Paramiko's `SSHClient`. + """ + + # for unit tests to hook into + _runner = staticmethod(run.run) + _reimage_types = None + + def __init__(self, name, ssh=None, shortname=None, console=None, + host_key=None, keep_alive=True): + self.name = name + if '@' in name: + (self.user, hostname) = name.split('@') + # Temporary workaround for 'hostname --fqdn' not working on some + # machines + self._hostname = hostname + else: + # os.getlogin() doesn't work on non-login shells. The following + # should work on any unix system + self.user = pwd.getpwuid(os.getuid()).pw_name + hostname = name + self._shortname = shortname or host_shortname(hostname) + self._host_key = host_key + self.keep_alive = keep_alive + self._console = console + self.ssh = ssh + + if self._reimage_types is None: + Remote._reimage_types = teuthology.provision.get_reimage_types() + + def connect(self, timeout=None, create_key=None, context='connect'): + args = dict(user_at_host=self.name, host_key=self._host_key, + keep_alive=self.keep_alive, _create_key=create_key) + if context == 'reconnect': + # The reason for the 'context' workaround is not very + # clear from the technical side. + # I'll get "[Errno 98] Address already in use" altough + # there are no open tcp(ssh) connections. + # When connecting without keepalive, host_key and _create_key + # set, it will proceed. + args = dict(user_at_host=self.name, _create_key=False, host_key=None) + if timeout: + args['timeout'] = timeout + + self.ssh = connection.connect(**args) + return self.ssh + + def reconnect(self, timeout=None, socket_timeout=None, sleep_time=30): + """ + Attempts to re-establish connection. Returns True for success; False + for failure. + """ + if self.ssh is not None: + self.ssh.close() + if not timeout: + return self._reconnect(timeout=socket_timeout) + start_time = time.time() + elapsed_time = lambda: time.time() - start_time + while elapsed_time() < timeout: + success = self._reconnect(timeout=socket_timeout) + if success: + log.info(f"Successfully reconnected to host '{self.name}'") + break + # Don't let time_remaining be < 0 + time_remaining = max(0, timeout - elapsed_time()) + sleep_val = min(time_remaining, sleep_time) + time.sleep(sleep_val) + return success + + def _reconnect(self, timeout=None): + log.info(f"Trying to reconnect to host '{self.name}'") + try: + self.connect(timeout=timeout, context='reconnect') + return self.is_online + except Exception as e: + log.debug(e) + return False + + @property + def ip_address(self): + return self.ssh.get_transport().getpeername()[0] + + @property + def interface(self): + """ + The interface used by the current SSH connection + """ + if not hasattr(self, '_interface'): + self._set_iface_and_cidr() + return self._interface + + @property + def cidr(self): + """ + The network (in CIDR notation) used by the remote's SSH connection + """ + if not hasattr(self, '_cidr'): + self._set_iface_and_cidr() + return self._cidr + + def _set_iface_and_cidr(self): + ip_addr_show = self.sh('PATH=/sbin:/usr/sbin ip addr show') + regexp = 'inet.? %s' % self.ip_address + for line in ip_addr_show.split('\n'): + line = line.strip() + if re.match(regexp, line): + items = line.split() + self._interface = items[-1] + self._cidr = str(netaddr.IPNetwork(items[1]).cidr) + return + raise RuntimeError("Could not determine interface/CIDR!") + + @property + def hostname(self): + if not hasattr(self, '_hostname'): + self._hostname = self.sh('hostname --fqdn').strip() + return self._hostname + + @property + def machine_type(self): + if not getattr(self, '_machine_type', None): + remote_info = teuthology.lock.query.get_status(self.hostname) + if not remote_info: + return None + self._machine_type = remote_info.get("machine_type", None) + return self._machine_type + + @property + def is_reimageable(self): + return self.machine_type in self._reimage_types + + @property + def shortname(self): + if self._shortname is None: + self._shortname = host_shortname(self.hostname) + return self._shortname + + @property + def is_online(self): + if self.ssh is None: + return False + if self.ssh.get_transport() is None: + return False + try: + self.run(args="true") + except Exception: + return False + return self.ssh.get_transport().is_active() + + def ensure_online(self): + if self.is_online: + return + self.connect() + if not self.is_online: + raise Exception('unable to connect') + + @property + def system_type(self): + """ + System type decorator + """ + return misc.get_system_type(self) + + def __str__(self): + return self.name + + def __repr__(self): + return '{classname}(name={name!r})'.format( + classname=self.__class__.__name__, + name=self.name, + ) + + def run(self, **kwargs): + """ + This calls `orchestra.run.run` with our SSH client. + + TODO refactor to move run.run here? + """ + if not self.ssh or \ + not self.ssh.get_transport() or \ + not self.ssh.get_transport().is_active(): + if not self.reconnect(): + raise Exception(f'Cannot connect to remote host {self.shortname}') + r = self._runner(client=self.ssh, name=self.shortname, **kwargs) + r.remote = self + return r + + def _sftp_put_file(self, local_path, remote_path): + """ + Use the paramiko.SFTPClient to put a file. Returns the remote filename. + """ + sftp = self.ssh.open_sftp() + sftp.put(local_path, remote_path) + return + + def _sftp_get_file(self, remote_path, local_path): + """ + Use the paramiko.SFTPClient to get a file. Returns the local filename. + """ + file_size = self._format_size( + self._sftp_get_size(remote_path) + ).strip() + log.debug("{}:{} is {}".format(self.shortname, remote_path, file_size)) + sftp = self.ssh.open_sftp() + sftp.get(remote_path, local_path) + return local_path + + def _sftp_open_file(self, remote_path): + """ + Use the paramiko.SFTPClient to open a file. Returns a + paramiko.SFTPFile object. + """ + sftp = self.ssh.open_sftp() + return sftp.open(remote_path) + + def _sftp_get_size(self, remote_path): + """ + Via _sftp_open_file, return the filesize in bytes + """ + with self._sftp_open_file(remote_path) as f: + return f.stat().st_size + + @staticmethod + def _format_size(file_size): + """ + Given a file_size in bytes, returns a human-readable representation. + """ + for unit in ('B', 'KB', 'MB', 'GB', 'TB'): + if abs(file_size) < 1024.0: + break + file_size = file_size / 1024.0 + return "{:3.0f}{}".format(file_size, unit) + + def put_file(self, path, dest_path, sudo=False): + """ + Copy a local filename to a remote file + """ + if sudo: + raise NotImplementedError("sudo not supported") + + self._sftp_put_file(path, dest_path) + return + + def get_file(self, path, sudo=False, dest_dir='/tmp'): + """ + Fetch a remote file, and return its local filename. + + :param sudo: Use sudo on the remote end to read a file that + requires it. Defaults to False. + :param dest_dir: Store the file in this directory. If it is /tmp, + generate a unique filename; if not, use the original + filename. + :returns: The path to the local file + """ + if not os.path.isdir(dest_dir): + raise IOError("{dir} is not a directory".format(dir=dest_dir)) + + if sudo: + orig_path = path + path = self.mktemp() + args = [ + 'sudo', + 'cp', + orig_path, + path, + ] + self.run(args=args) + self.chmod(path, '0666') + + if dest_dir == '/tmp': + # If we're storing in /tmp, generate a unique filename + (fd, local_path) = tempfile.mkstemp(dir=dest_dir) + os.close(fd) + else: + # If we are storing somewhere other than /tmp, use the original + # filename + local_path = os.path.join(dest_dir, path.split(os.path.sep)[-1]) + + self._sftp_get_file(path, local_path) + if sudo: + self.remove(path) + return local_path + + def get_tar(self, path, to_path, sudo=False): + """ + Tar a remote directory and copy it locally + """ + remote_temp_path = self.mktemp() + args = [] + if sudo: + args.append('sudo') + args.extend([ + 'tar', + 'cz', + '-f', '-', + '-C', path, + '--', + '.', + run.Raw('>'), remote_temp_path + ]) + self.run(args=args) + if sudo: + self.chmod(remote_temp_path, '0666') + self._sftp_get_file(remote_temp_path, to_path) + self.remove(remote_temp_path) + + def get_tar_stream(self, path, sudo=False): + """ + Tar-compress a remote directory and return the RemoteProcess + for streaming + """ + args = [] + if sudo: + args.append('sudo') + args.extend([ + 'tar', + 'cz', + '-f', '-', + '-C', path, + '--', + '.', + ]) + return self.run(args=args, wait=False, stdout=run.PIPE) + + @property + def host_key(self): + if not self._host_key: + trans = self.ssh.get_transport() + key = trans.get_remote_server_key() + self._host_key = ' '.join((key.get_name(), key.get_base64())) + return self._host_key + + @property + def inventory_info(self): + node = dict() + node['name'] = self.hostname + node['user'] = self.user + node['arch'] = self.arch + node['os_type'] = self.os.name + node['os_version'] = '.'.join(self.os.version.split('.')[:2]) + node['ssh_pub_key'] = self.host_key + node['up'] = True + return node + + @property + def console(self): + if not self._console: + self._console = getRemoteConsole(self.name) + return self._console + + @property + def is_vm(self): + if not hasattr(self, '_is_vm'): + self._is_vm = teuthology.lock.query.is_vm(self.name) + return self._is_vm + + @property + def is_container(self): + if not hasattr(self, '_is_container'): + self._is_container = not bool(self.run( + args="test -f /run/.containerenv -o -f /.dockerenv", + check_status=False, + ).returncode) + return self._is_container + + @property + def init_system(self): + """ + Which init system does the remote use? + + :returns: 'systemd' or None + """ + if not hasattr(self, '_init_system'): + self._init_system = None + proc = self.run( + args=['which', 'systemctl'], + check_status=False, + ) + if proc.returncode == 0: + self._init_system = 'systemd' + return self._init_system + + def __del__(self): + if self.ssh is not None: + self.ssh.close() + + +def getRemoteConsole(name, ipmiuser=None, ipmipass=None, ipmidomain=None, + logfile=None, timeout=60): + """ + Return either VirtualConsole or PhysicalConsole depending on name. + """ + if teuthology.lock.query.is_vm(name): + try: + return console.VirtualConsole(name) + except Exception: + return None + return console.PhysicalConsole( + name, ipmiuser, ipmipass, ipmidomain, logfile, timeout) diff --git a/teuthology/orchestra/run.py b/teuthology/orchestra/run.py new file mode 100644 index 0000000000..eab28ed495 --- /dev/null +++ b/teuthology/orchestra/run.py @@ -0,0 +1,534 @@ +""" +Paramiko run support +""" + +import io, re + +from paramiko import ChannelFile + +import gevent +import gevent.event +import socket +import pipes +import logging +import shutil + +from teuthology.contextutil import safe_while +from teuthology.exceptions import (CommandCrashedError, CommandFailedError, + ConnectionLostError, UnitTestError) + +log = logging.getLogger(__name__) + + +class RemoteProcess(object): + """ + An object to begin and monitor execution of a process on a remote host + """ + __slots__ = [ + 'client', 'args', 'check_status', 'command', 'hostname', + 'stdin', 'stdout', 'stderr', + '_stdin_buf', '_stdout_buf', '_stderr_buf', + 'returncode', 'exitstatus', 'timeout', + 'greenlets', + '_wait', 'logger', + # for orchestra.remote.Remote to place a backreference + 'remote', + 'label', + 'scan_tests_errors', + ] + + deadlock_warning = "Using PIPE for %s without wait=False would deadlock" + + def __init__(self, client, args, check_status=True, hostname=None, + label=None, timeout=None, wait=True, logger=None, cwd=None, scan_tests_errors=[]): + """ + Create the object. Does not initiate command execution. + + :param client: paramiko.SSHConnection to run the command with + :param args: Command to run. + :type args: String or list of strings + :param check_status: Whether to raise CommandFailedError on non-zero + exit status, and . Defaults to True. All signals + and connection loss are made to look like SIGHUP. + :param hostname: Name of remote host (optional) + :param label: Can be used to label or describe what the + command is doing. + :param timeout: timeout value for arg that is passed to + exec_command of paramiko + :param wait: Whether self.wait() will be called automatically + :param logger: Alternative logger to use (optional) + :param cwd: Directory in which the command will be executed + (optional) + :param scan_tests_errors: name of unit tests for which logs need to + be scanned to look for errors (optional) + """ + self.client = client + self.args = args + if isinstance(args, list): + self.command = quote(args) + else: + self.command = args + + if cwd: + self.command = '(cd {cwd} && exec {cmd})'.format( + cwd=cwd, cmd=self.command) + + self.check_status = check_status + self.label = label + if timeout: + self.timeout = timeout + if hostname: + self.hostname = hostname + else: + (self.hostname, port) = client.get_transport().getpeername()[0:2] + + self.greenlets = [] + self.stdin, self.stdout, self.stderr = (None, None, None) + self.returncode = self.exitstatus = None + self._wait = wait + self.logger = logger or log + self.scan_tests_errors = scan_tests_errors + + def execute(self): + """ + Execute remote command + """ + for line in self.command.split('\n'): + log.getChild(self.hostname).debug('%s> %s' % (self.label or '', line)) + + if hasattr(self, 'timeout'): + (self._stdin_buf, self._stdout_buf, self._stderr_buf) = \ + self.client.exec_command(self.command, timeout=self.timeout) + else: + (self._stdin_buf, self._stdout_buf, self._stderr_buf) = \ + self.client.exec_command(self.command) + (self.stdin, self.stdout, self.stderr) = \ + (self._stdin_buf, self._stdout_buf, self._stderr_buf) + + def add_greenlet(self, greenlet): + self.greenlets.append(greenlet) + + def setup_stdin(self, stream_obj): + self.stdin = KludgeFile(wrapped=self.stdin) + if stream_obj is not PIPE: + greenlet = gevent.spawn(copy_and_close, stream_obj, self.stdin) + self.add_greenlet(greenlet) + self.stdin = None + elif self._wait: + # FIXME: Is this actually true? + raise RuntimeError(self.deadlock_warning % 'stdin') + + def setup_output_stream(self, stream_obj, stream_name, quiet=False): + if stream_obj is not PIPE: + # Log the stream + host_log = self.logger.getChild(self.hostname) + stream_log = host_log.getChild(stream_name) + self.add_greenlet( + gevent.spawn( + copy_file_to, + getattr(self, stream_name), + stream_log, + stream_obj, + quiet, + ) + ) + setattr(self, stream_name, stream_obj) + elif self._wait: + # FIXME: Is this actually true? + raise RuntimeError(self.deadlock_warning % stream_name) + + def wait(self): + """ + Block until remote process finishes. + + :returns: self.returncode + """ + + status = self._get_exitstatus() + if status != 0: + log.debug("got remote process result: {}".format(status)) + for greenlet in self.greenlets: + try: + greenlet.get(block=True,timeout=60) + except gevent.Timeout: + log.debug("timed out waiting; will kill: {}".format(greenlet)) + greenlet.kill(block=False) + for stream in ('stdout', 'stderr'): + if hasattr(self, stream): + stream_obj = getattr(self, stream) + # Despite ChannelFile having a seek() method, it raises + # "IOError: File does not support seeking." + if hasattr(stream_obj, 'seek') and \ + not isinstance(stream_obj, ChannelFile): + stream_obj.seek(0) + + self._raise_for_status() + return status + + def _raise_for_status(self): + if self.returncode is None: + self._get_exitstatus() + if self.check_status: + if self.returncode in (None, -1): + # command either died due to a signal, or the connection + # was lost + transport = self.client.get_transport() + if transport is None or not transport.is_active(): + # look like we lost the connection + raise ConnectionLostError(command=self.command, + node=self.hostname) + + # connection seems healthy still, assuming it was a + # signal; sadly SSH does not tell us which signal + raise CommandCrashedError(command=self.command) + if self.returncode != 0: + if self.scan_tests_errors: + error = ErrorScanner(self.scan_tests_errors).scan() + if error: + raise UnitTestError( + command=self.command, exitstatus=self.returncode, + node=self.hostname, label=self.label, + message=error, + ) + raise CommandFailedError( + command=self.command, exitstatus=self.returncode, + node=self.hostname, label=self.label + ) + + def _get_exitstatus(self): + """ + :returns: the remote command's exit status (return code). Note that + if the connection is lost, or if the process was killed by a + signal, this returns None instead of paramiko's -1. + """ + status = self._stdout_buf.channel.recv_exit_status() + self.exitstatus = self.returncode = status + if status == -1: + status = None + return status + + @property + def finished(self): + gevent.wait(self.greenlets, timeout=0.1) + ready = self._stdout_buf.channel.exit_status_ready() + if ready: + self._get_exitstatus() + return ready + + def poll(self): + """ + :returns: self.returncode if the process is finished; else None + """ + if self.finished: + self._raise_for_status() + return self.returncode + return None + + def __repr__(self): + return '{classname}(client={client!r}, args={args!r}, check_status={check}, hostname={name!r})'.format( # noqa + classname=self.__class__.__name__, + client=self.client, + args=self.args, + check=self.check_status, + name=self.hostname, + ) + +class ErrorScanner(object): + """ + Scan for unit tests errors in teuthology.log + """ + __flag__ = 0 + def __init__(self, test_names=[]): + self.test_names = test_names + + def scan(self): + logfile = self.__logfile__ + if not logfile or not self.test_names: + return None + + ERROR_PATTERN = { + "nose": r"ERROR:\s", + "gtest": r"\[\s\sFAILED\s\s\]", + } + logs = [] + with open(logfile, 'r') as f: + logs = f.readlines() + + for line_number in range(len(logs) - 1, ErrorScanner.__flag__, -1): + line = logs[line_number] + for test in self.test_names: + pattern = ERROR_PATTERN[test] + error = re.search(pattern, line) + if error: + ErrorScanner.__flag__ = line_number + return line[error.start():].strip() + ErrorScanner.__flag__ = len(logs) - 1 + return None + + @property + def __logfile__(self): + loggers = logging.getLogger() + for h in loggers.handlers: + if isinstance(h, logging.FileHandler): + return h.stream.name + return None + + +class Raw(object): + + """ + Raw objects are passed to remote objects and are not processed locally. + """ + def __init__(self, value): + self.value = value + + def __repr__(self): + return '{cls}({value!r})'.format( + cls=self.__class__.__name__, + value=self.value, + ) + + def __eq__(self, value): + return self.value == value + + +def quote(args): + """ + Internal quote wrapper. + """ + def _quote(args): + """ + Handle quoted string, testing for raw charaters. + """ + for a in args: + if isinstance(a, Raw): + yield a.value + else: + yield pipes.quote(a) + if isinstance(args, list): + return ' '.join(_quote(args)) + else: + return args + + +def copy_to_log(f, logger, loglevel=logging.INFO, capture=None, quiet=False): + """ + Copy line by line from file in f to the log from logger + + :param f: source stream object + :param logger: the destination logger object + :param loglevel: the level of logging data + :param capture: an optional stream object for data copy + :param quiet: suppress `logger` usage if True, this is useful only + in combination with `capture`, defaults False + """ + # Work-around for http://tracker.ceph.com/issues/8313 + if isinstance(f, ChannelFile): + f._flags += ChannelFile.FLAG_BINARY + for line in f: + if capture: + if isinstance(capture, io.StringIO): + if isinstance(line, str): + capture.write(line) + else: + capture.write(line.decode('utf-8', 'replace')) + elif isinstance(capture, io.BytesIO): + if isinstance(line, str): + capture.write(line.encode()) + else: + capture.write(line) + line = line.rstrip() + # Second part of work-around for http://tracker.ceph.com/issues/8313 + if quiet: + continue + try: + if isinstance(line, bytes): + line = line.decode('utf-8', 'replace') + logger.log(loglevel, line) + except (UnicodeDecodeError, UnicodeEncodeError): + logger.exception("Encountered unprintable line in command output") + + +def copy_and_close(src, fdst): + """ + copyfileobj call wrapper. + """ + if src is not None: + if isinstance(src, bytes): + src = io.BytesIO(src) + elif isinstance(src, str): + src = io.StringIO(src) + shutil.copyfileobj(src, fdst) + fdst.close() + + +def copy_file_to(src, logger, stream=None, quiet=False): + """ + Copy file + :param src: file to be copied. + :param logger: the logger object + :param stream: an optional file-like object which will receive + a copy of src. + :param quiet: disable logger usage if True, useful in combination + with `stream` parameter, defaults False. + """ + copy_to_log(src, logger, capture=stream, quiet=quiet) + +def spawn_asyncresult(fn, *args, **kwargs): + """ + Spawn a Greenlet and pass it's results to an AsyncResult. + + This function is useful to shuffle data from a Greenlet to + AsyncResult, which then again is useful because any Greenlets that + raise exceptions will cause tracebacks to be shown on stderr by + gevent, even when ``.link_exception`` has been called. Using an + AsyncResult avoids this. + """ + r = gevent.event.AsyncResult() + + def wrapper(): + """ + Internal wrapper. + """ + try: + value = fn(*args, **kwargs) + except Exception as e: + r.set_exception(e) + else: + r.set(value) + gevent.spawn(wrapper) + + return r + + +class Sentinel(object): + + """ + Sentinel -- used to define PIPE file-like object. + """ + def __init__(self, name): + self.name = name + + def __str__(self): + return self.name + +PIPE = Sentinel('PIPE') + + +class KludgeFile(object): + + """ + Wrap Paramiko's ChannelFile in a way that lets ``f.close()`` + actually cause an EOF for the remote command. + """ + def __init__(self, wrapped): + self._wrapped = wrapped + + def __getattr__(self, name): + return getattr(self._wrapped, name) + + def close(self): + """ + Close and shutdown. + """ + self._wrapped.close() + self._wrapped.channel.shutdown_write() + + +def run( + client, args, + stdin=None, stdout=None, stderr=None, + logger=None, + check_status=True, + wait=True, + name=None, + label=None, + quiet=False, + timeout=None, + cwd=None, + scan_tests_errors=[], + # omit_sudo is used by vstart_runner.py + omit_sudo=False +): + """ + Run a command remotely. If any of 'args' contains shell metacharacters + that you want to pass unquoted, pass it as an instance of Raw(); otherwise + it will be quoted with pipes.quote() (single quote, and single quotes + enclosed in double quotes). + + :param client: SSHConnection to run the command with + :param args: command to run + :type args: list of string + :param stdin: Standard input to send; either a string, a file-like object, + None, or `PIPE`. `PIPE` means caller is responsible for + closing stdin, or command may never exit. + :param stdout: What to do with standard output. Either a file-like object, + a `logging.Logger`, `PIPE`, or `None` for copying to default + log. `PIPE` means caller is responsible for reading, or + command may never exit. + :param stderr: What to do with standard error. See `stdout`. + :param logger: If logging, write stdout/stderr to "out" and "err" children + of this logger. Defaults to logger named after this module. + :param check_status: Whether to raise CommandFailedError on non-zero exit + status, and . Defaults to True. All signals and + connection loss are made to look like SIGHUP. + :param wait: Whether to wait for process to exit. If False, returned + ``r.exitstatus`` s a `gevent.event.AsyncResult`, and the + actual status is available via ``.get()``. + :param name: Human readable name (probably hostname) of the destination + host + :param label: Can be used to label or describe what the command is doing. + :param quiet: Do not log command's stdout and stderr, defaults False. + :param timeout: timeout value for args to complete on remote channel of + paramiko + :param cwd: Directory in which the command should be executed. + :param scan_tests_errors: List of unit-tests names for which teuthology logs would + be scanned to look for errors. + """ + try: + transport = client.get_transport() + if transport: + (host, port) = transport.getpeername()[0:2] + else: + raise ConnectionLostError(command=quote(args), node=name) + except socket.error: + raise ConnectionLostError(command=quote(args), node=name) + + if name is None: + name = host + + if timeout: + log.info("Running command with timeout %d", timeout) + r = RemoteProcess(client, args, check_status=check_status, hostname=name, + label=label, timeout=timeout, wait=wait, logger=logger, + cwd=cwd, scan_tests_errors=scan_tests_errors) + r.execute() + r.setup_stdin(stdin) + r.setup_output_stream(stderr, 'stderr', quiet) + r.setup_output_stream(stdout, 'stdout', quiet) + if wait: + r.wait() + return r + + +def wait(processes, timeout=None): + """ + Wait for all given processes to exit. + + Raise if any one of them fails. + + Optionally, timeout after 'timeout' seconds. + """ + if timeout: + log.info("waiting for %d", timeout) + if timeout and timeout > 0: + with safe_while(tries=(timeout // 6)) as check_time: + not_ready = list(processes) + while len(not_ready) > 0: + check_time() + for proc in list(not_ready): + if proc.finished: + not_ready.remove(proc) + + for proc in processes: + proc.wait() diff --git a/teuthology/orchestra/test/__init__.py b/teuthology/orchestra/test/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/orchestra/test/files/daemon-systemdstate-pid-ps-ef.output b/teuthology/orchestra/test/files/daemon-systemdstate-pid-ps-ef.output new file mode 100644 index 0000000000..ddddf571c9 --- /dev/null +++ b/teuthology/orchestra/test/files/daemon-systemdstate-pid-ps-ef.output @@ -0,0 +1,5 @@ +ceph 658 1 0 Jun08 ? 00:07:43 /usr/bin/ceph-mgr -f --cluster ceph --id host1 --setuser ceph --setgroup ceph +ceph 1634 1 0 Jun08 ? 00:02:17 /usr/bin/ceph-mds -f --cluster ceph --id host1 --setuser ceph --setgroup ceph +ceph 31555 1 0 Jun08 ? 01:13:50 /usr/bin/ceph-mon -f --cluster ceph --id host1 --setuser ceph --setgroup ceph +ceph 31765 1 0 Jun08 ? 00:48:42 /usr/bin/radosgw -f --cluster ceph --name client.rgw.host1.rgw0 --setuser ceph --setgroup ceph +ceph 97427 1 0 Jun17 ? 00:41:39 /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser ceph --setgroup ceph \ No newline at end of file diff --git a/teuthology/orchestra/test/integration/__init__.py b/teuthology/orchestra/test/integration/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/teuthology/orchestra/test/integration/test_integration.py b/teuthology/orchestra/test/integration/test_integration.py new file mode 100644 index 0000000000..f4ea627a28 --- /dev/null +++ b/teuthology/orchestra/test/integration/test_integration.py @@ -0,0 +1,94 @@ +from teuthology.orchestra import monkey +monkey.patch_all() + +from io import StringIO + +import os +from teuthology.orchestra import connection, remote, run +from teuthology.orchestra.test.util import assert_raises +from teuthology.exceptions import CommandCrashedError, ConnectionLostError + +from pytest import skip + +HOST = None + + +class TestIntegration(): + def setup(self): + try: + host = os.environ['ORCHESTRA_TEST_HOST'] + except KeyError: + skip('To run integration tests, set environment ' + + 'variable ORCHESTRA_TEST_HOST to user@host to use.') + global HOST + HOST = host + + def test_crash(self): + ssh = connection.connect(HOST) + e = assert_raises( + CommandCrashedError, + run.run, + client=ssh, + args=['sh', '-c', 'kill -ABRT $$'], + ) + assert e.command == "sh -c 'kill -ABRT $$'" + assert str(e) == "Command crashed: \"sh -c 'kill -ABRT $$'\"" + + def test_lost(self): + ssh = connection.connect(HOST) + e = assert_raises( + ConnectionLostError, + run.run, + client=ssh, + args=['sh', '-c', 'kill -ABRT $PPID'], + name=HOST, + ) + assert e.command == "sh -c 'kill -ABRT $PPID'" + assert str(e) == \ + "SSH connection to {host} was lost: ".format(host=HOST) + \ + "\"sh -c 'kill -ABRT $PPID'\"" + + def test_pipe(self): + ssh = connection.connect(HOST) + r = run.run( + client=ssh, + args=['cat'], + stdin=run.PIPE, + stdout=StringIO(), + wait=False, + ) + assert r.stdout.getvalue() == '' + r.stdin.write('foo\n') + r.stdin.write('bar\n') + r.stdin.close() + + r.wait() + got = r.exitstatus + assert got == 0 + assert r.stdout.getvalue() == 'foo\nbar\n' + + def test_and(self): + ssh = connection.connect(HOST) + r = run.run( + client=ssh, + args=['true', run.Raw('&&'), 'echo', 'yup'], + stdout=StringIO(), + ) + assert r.stdout.getvalue() == 'yup\n' + + def test_os(self): + rem = remote.Remote(HOST) + assert rem.os.name + assert rem.os.version + + def test_17102(self, caplog): + # http://tracker.ceph.com/issues/17102 + rem = remote.Remote(HOST) + interval = 3 + rem.run(args="echo before; sleep %s; echo after" % interval) + for record in caplog.records: + if record.msg == 'before': + before_time = record.created + elif record.msg == 'after': + after_time = record.created + assert int(round(after_time - before_time)) == interval diff --git a/teuthology/orchestra/test/log_files/test_scan_gtest.log b/teuthology/orchestra/test/log_files/test_scan_gtest.log new file mode 100644 index 0000000000..ed2504f55d --- /dev/null +++ b/teuthology/orchestra/test/log_files/test_scan_gtest.log @@ -0,0 +1,54 @@ +2021-11-26T03:42:35.308 INFO:tasks.workunit.client.0.smithi063.stdout:created completion +2021-11-26T03:42:35.308 INFO:tasks.workunit.client.0.smithi063.stdout:started writesame +2021-11-26T03:42:35.308 INFO:tasks.workunit.client.0.smithi063.stdout:write completion cb called! +2021-11-26T03:42:35.309 INFO:tasks.workunit.client.0.smithi063.stdout:return value is: 0 +2021-11-26T03:42:35.309 INFO:tasks.workunit.client.0.smithi063.stdout:finished writesame +2021-11-26T03:42:35.309 INFO:tasks.workunit.client.0.smithi063.stdout:to verify the data +2021-11-26T03:42:35.309 INFO:tasks.workunit.client.0.smithi063.stdout:verified +2021-11-26T03:42:35.309 INFO:tasks.workunit.client.0.smithi063.stdout:created completion +2021-11-26T03:42:35.310 INFO:tasks.workunit.client.0.smithi063.stdout:started writesame +2021-11-26T03:42:35.310 INFO:tasks.workunit.client.0.smithi063.stdout:expected fail, finished writesame +2021-11-26T03:42:35.310 INFO:tasks.workunit.client.0.smithi063.stdout:created completion +2021-11-26T03:42:35.310 INFO:tasks.workunit.client.0.smithi063.stdout:started writesame +2021-11-26T03:42:35.311 INFO:tasks.workunit.client.0.smithi063.stdout:expected fail, finished writesame +2021-11-26T03:42:35.311 INFO:tasks.workunit.client.0.smithi063.stdout:read completion cb called! +2021-11-26T03:42:35.311 INFO:tasks.workunit.client.0.smithi063.stdout:read completion cb called! +2021-11-26T03:42:35.311 INFO:tasks.workunit.client.0.smithi063.stdout:wrote: 512 +2021-11-26T03:42:35.312 INFO:tasks.workunit.client.0.smithi063.stdout:read completion cb called! +2021-11-26T03:42:35.312 INFO:tasks.workunit.client.0.smithi063.stdout:wrote: 4 +2021-11-26T03:42:35.312 INFO:tasks.workunit.client.0.smithi063.stdout:read: 4 +2021-11-26T03:42:35.312 INFO:tasks.workunit.client.0.smithi063.stdout:read: --42 +2021-11-26T03:42:35.313 INFO:tasks.workunit.client.0.smithi063.stdout:expected: test +2021-11-26T03:42:35.313 INFO:tasks.workunit.client.0.smithi063.stdout:/build/ceph-16.2.6-681-gfdc003bc/src/test/librbd/test_librbd.cc:364: Failure +2021-11-26T03:42:35.313 INFO:tasks.workunit.client.0.smithi063.stdout:Expected equality of these values: +2021-11-26T03:42:35.313 INFO:tasks.workunit.client.0.smithi063.stdout: 0 +2021-11-26T03:42:35.313 INFO:tasks.workunit.client.0.smithi063.stdout: memcmp(result, expected, len) +2021-11-26T03:42:35.314 INFO:tasks.workunit.client.0.smithi063.stdout: Which is: -1 +2021-11-26T03:42:35.317 INFO:tasks.workunit.client.0.smithi063.stdout:[ FAILED ] FIRST TestLibRBD.TestEncryptionLUKS2 (12236 ms) +2021-11-26T03:42:35.317 INFO:tasks.workunit.client.0.smithi063.stdout:[ RUN ] TestLibRBD.TestIOWithIOHint +2021-11-26T03:42:35.318 INFO:tasks.workunit.client.0.smithi063.stdout:using new format! +2021-11-26T03:42:37.857 INFO:tasks.workunit.client.0.smithi063.stdout:wrote: 512 +2021-11-26T03:42:37.858 INFO:tasks.workunit.client.0.smithi063.stdout:wrote: 512 +2021-11-26T03:42:37.858 INFO:tasks.workunit.client.0.smithi063.stdout:wrote: 512 +2021-11-26T03:42:37.858 INFO:tasks.workunit.client.0.smithi063.stdout:wrote: 512 +2021-11-26T03:42:37.858 INFO:tasks.workunit.client.0.smithi063.stdout:wrote: 512 +2021-11-26T03:42:37.859 INFO:tasks.workunit.client.0.smithi063.stdout:created completion +2021-11-26T03:42:37.859 INFO:tasks.workunit.client.0.smithi063.stdout:started write +2021-11-26T03:42:37.859 INFO:tasks.workunit.client.0.smithi063.stdout:write completion cb called! +2021-11-26T03:42:37.859 INFO:tasks.workunit.client.0.smithi063.stdout:return value is: 0 +2021-11-26T03:42:37.859 INFO:tasks.workunit.client.0.smithi063.stdout:finished write +2021-11-26T03:42:37.860 INFO:tasks.workunit.client.0.smithi063.stdout:created completion +2021-11-26T03:42:37.860 INFO:tasks.workunit.client.0.smithi063.stdout:started write +2021-11-26T03:42:37.860 INFO:tasks.workunit.client.0.smithi063.stdout:write completion cb called! +2021-11-26T03:42:37.860 INFO:tasks.workunit.client.0.smithi063.stdout:return value is: 0 +2021-11-26T03:42:37.861 INFO:tasks.workunit.client.0.smithi063.stdout:finished write +2021-11-26T03:42:37.861 INFO:tasks.workunit.client.0.smithi063.stdout:created completion +2021-11-26T03:42:37.861 INFO:tasks.workunit.client.0.smithi063.stdout:started write +2021-11-26T03:42:35.317 INFO:tasks.workunit.client.0.smithi063.stdout:[ FAILED ] SECOND TestLibRBD.TestEncryptionLUKS2 (12236 ms) +2021-11-26T03:42:37.861 INFO:tasks.workunit.client.0.smithi063.stdout:write completion cb called! +2021-11-26T03:42:37.862 INFO:tasks.workunit.client.0.smithi063.stdout:return value is: 0 +2021-11-26T03:42:37.862 INFO:tasks.workunit.client.0.smithi063.stdout:finished write +2021-11-26T03:42:37.862 INFO:tasks.workunit.client.0.smithi063.stdout:created completion +2021-11-26T03:42:37.862 INFO:tasks.workunit.client.0.smithi063.stdout:started write +2021-11-26T03:42:37.862 INFO:tasks.workunit.client.0.smithi063.stdout:write completion cb called! +2021-11-26T03:42:37.863 INFO:tasks.workunit.client.0.smithi063.stdout:return value is: 0 \ No newline at end of file diff --git a/teuthology/orchestra/test/log_files/test_scan_nose.log b/teuthology/orchestra/test/log_files/test_scan_nose.log new file mode 100644 index 0000000000..0569dd2acf --- /dev/null +++ b/teuthology/orchestra/test/log_files/test_scan_nose.log @@ -0,0 +1,49 @@ +2021-11-29T07:42:27.833 INFO:teuthology.orchestra.run.smithi053.stdout:Best match: botocore 1.23.14 +2021-11-29T07:42:27.833 INFO:teuthology.orchestra.run.smithi053.stdout:Adding botocore 1.23.14 to easy-install.pth file +2021-11-29T07:42:27.833 INFO:teuthology.orchestra.run.smithi053.stdout: +2021-11-29T07:42:27.833 INFO:teuthology.orchestra.run.smithi053.stdout:Using /home/ubuntu/cephtest/s3-tests/virtualenv/lib/python3.6/site-packages +2021-11-29T07:42:27.834 INFO:teuthology.orchestra.run.smithi053.stdout:Searching for urllib3==1.26.7 +2021-11-29T07:42:27.834 INFO:teuthology.orchestra.run.smithi053.stdout:Best match: urllib3 1.26.7 +2021-11-29T07:42:27.834 INFO:teuthology.orchestra.run.smithi053.stdout:Adding urllib3 1.26.7 to easy-install.pth file +2021-11-29T07:42:27.834 INFO:teuthology.orchestra.run.smithi053.stdout: +2021-11-29T07:42:27.835 INFO:teuthology.orchestra.run.smithi053.stdout:Using /home/ubuntu/cephtest/s3-tests/virtualenv/lib/python3.6/site-packages +2021-11-29T07:42:27.835 INFO:teuthology.orchestra.run.smithi053.stdout:Searching for python-dateutil==2.8.2 +2021-11-29T07:42:27.835 INFO:teuthology.orchestra.run.smithi053.stdout:Best match: python-dateutil 2.8.2 +2021-11-29T07:42:27.835 INFO:teuthology.orchestra.run.smithi053.stdout:Adding python-dateutil 2.8.2 to easy-install.pth file +2021-11-29T07:42:27.836 INFO:teuthology.orchestra.run.smithi053.stdout: +2021-11-29T07:42:27.836 INFO:teuthology.orchestra.run.smithi053.stdout:Using /home/ubuntu/cephtest/s3-tests/virtualenv/lib/python3.6/site-packages +2021-11-29T07:42:27.836 INFO:teuthology.orchestra.run.smithi053.stdout:Finished processing dependencies for s3tests==0.0.1 +2021-11-29T07:42:27.840 DEBUG:teuthology.orchestra.run.smithi053:> set -ex +2021-11-29T07:42:27.840 DEBUG:teuthology.orchestra.run.smithi053:> dd of=/home/ubuntu/cephtest/archive/s3-tests.client.0.conf +2021-11-29T07:42:27.855 INFO:tasks.s3tests:Configuring boto... +2021-11-29T07:42:27.856 DEBUG:teuthology.orchestra.run.smithi053:> set -ex +2021-11-29T07:42:27.856 DEBUG:teuthology.orchestra.run.smithi053:> dd of=/home/ubuntu/cephtest/boto.cfg +2021-11-29T07:42:27.910 DEBUG:teuthology.orchestra.run.smithi053:s3 tests against rgw> S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.0.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt /home/ubuntu/cephtest/s3-tests/virtualenv/bin/python -m nose -w /home/ubuntu/cephtest/s3-tests -v -a test_of_sts +2021-11-29T07:42:28.691 INFO:teuthology.orchestra.run.smithi053.stderr:s3tests_boto3.functional.test_sts.test_get_session_token ... ok +2021-11-29T07:42:28.722 INFO:teuthology.orchestra.run.smithi053.stderr:s3tests_boto3.functional.test_sts.test_get_session_token_permanent_creds_denied ... ok +2021-11-29T07:42:28.852 INFO:teuthology.orchestra.run.smithi053.stderr:s3tests_boto3.functional.test_sts.test_assume_role_allow ... ok +2021-11-29T07:42:28.954 INFO:teuthology.orchestra.run.smithi053.stderr:s3tests_boto3.functional.test_sts.test_assume_role_deny ... ok +2021-11-29T07:57:29.138 INFO:teuthology.orchestra.run.smithi053.stderr:s3tests_boto3.functional.test_sts.test_assume_role_creds_expiry ... ok +2021-11-29T07:57:29.258 INFO:teuthology.orchestra.run.smithi053.stderr:s3tests_boto3.functional.test_sts.test_assume_role_deny_head_nonexistent ... ok +2021-11-29T07:57:29.561 INFO:teuthology.orchestra.run.smithi053.stderr:s3tests_boto3.functional.test_sts.test_assume_role_allow_head_nonexistent ... ok +2021-11-29T07:57:29.561 INFO:teuthology.orchestra.run.smithi053.stderr:ERROR +2021-11-29T07:57:29.562 INFO:teuthology.orchestra.run.smithi053.stderr: +2021-11-29T07:57:29.562 INFO:teuthology.orchestra.run.smithi053.stderr:====================================================================== +2021-11-29T07:57:29.563 INFO:teuthology.orchestra.run.smithi053.stderr:ERROR: FIRST suite for +2021-11-29T07:57:29.563 INFO:teuthology.orchestra.run.smithi053.stderr:---------------------------------------------------------------------- +2021-11-29T07:57:29.563 INFO:teuthology.orchestra.run.smithi053.stderr:Traceback (most recent call last): +2021-11-29T07:57:29.564 INFO:teuthology.orchestra.run.smithi053.stderr: File "/home/ubuntu/cephtest/s3-tests/virtualenv/lib/python3.6/site-packages/nose/suite.py", line 229, in run +2021-11-29T07:57:29.564 INFO:teuthology.orchestra.run.smithi053.stderr: self.tearDown() +2021-11-29T07:57:29.564 INFO:teuthology.orchestra.run.smithi053.stderr: File "/home/ubuntu/cephtest/s3-tests/virtualenv/lib/python3.6/site-packages/nose/suite.py", line 352, in tearDown +2021-11-29T07:57:29.565 INFO:teuthology.orchestra.run.smithi053.stderr: self.teardownContext(ancestor) +2021-11-29T07:57:29.565 INFO:teuthology.orchestra.run.smithi053.stderr: File "/home/ubuntu/cephtest/s3-tests/virtualenv/lib/python3.6/site-packages/nose/suite.py", line 368, in teardownContext +2021-11-29T07:57:29.566 INFO:teuthology.orchestra.run.smithi053.stderr: try_run(context, names) +2021-11-29T07:57:29.566 INFO:teuthology.orchestra.run.smithi053.stderr: File "/home/ubuntu/cephtest/s3-tests/virtualenv/lib/python3.6/site-packages/nose/util.py", line 471, in try_run +2021-11-29T07:57:29.566 INFO:teuthology.orchestra.run.smithi053.stderr: return func() +2021-11-29T07:57:29.563 INFO:teuthology.orchestra.run.smithi053.stderr:ERROR: SECOND suite for +2021-11-29T07:57:29.567 INFO:teuthology.orchestra.run.smithi053.stderr: File "/home/ubuntu/cephtest/s3-tests/s3tests_boto3/functional/__init__.py", line 259, in teardown +2021-11-29T07:57:29.567 INFO:teuthology.orchestra.run.smithi053.stderr: nuke_prefixed_buckets(prefix=prefix, client=alt_client) +2021-11-29T07:57:29.568 INFO:teuthology.orchestra.run.smithi053.stderr: File "/home/ubuntu/cephtest/s3-tests/s3tests_boto3/functional/__init__.py", line 148, in nuke_prefixed_buckets +2021-11-29T07:57:29.569 INFO:teuthology.orchestra.run.smithi053.stderr: buckets = get_buckets_list(client, prefix) +2021-11-29T07:57:29.569 INFO:teuthology.orchestra.run.smithi053.stderr: File "/home/ubuntu/cephtest/s3-tests/s3tests_boto3/functional/__init__.py", line 54, in get_buckets_list +2021-11-29T07:57:29.569 INFO:teuthology.orchestra.run.smithi053.stderr: response = client.list_buckets() \ No newline at end of file diff --git a/teuthology/orchestra/test/test_cluster.py b/teuthology/orchestra/test/test_cluster.py new file mode 100644 index 0000000000..ee0d07a9eb --- /dev/null +++ b/teuthology/orchestra/test/test_cluster.py @@ -0,0 +1,232 @@ +import pytest + +from mock import patch, Mock + +from teuthology.orchestra import cluster, remote, run + + +class TestCluster(object): + def test_init_empty(self): + c = cluster.Cluster() + assert c.remotes == {} + + def test_init(self): + r1 = Mock() + r2 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['baz']), + ], + ) + r3 = Mock() + c.add(r3, ['xyzzy', 'thud', 'foo']) + assert c.remotes == { + r1: ['foo', 'bar'], + r2: ['baz'], + r3: ['xyzzy', 'thud', 'foo'], + } + + def test_repr(self): + r1 = remote.Remote('r1', ssh=Mock()) + r2 = remote.Remote('r2', ssh=Mock()) + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['baz']), + ], + ) + assert repr(c) == \ + "Cluster(remotes=[[Remote(name='r1'), ['foo', 'bar']], " \ + "[Remote(name='r2'), ['baz']]])" + + def test_str(self): + r1 = remote.Remote('r1', ssh=Mock()) + r2 = remote.Remote('r2', ssh=Mock()) + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['baz']), + ], + ) + assert str(c) == "r1[foo,bar] r2[baz]" + + def test_run_all(self): + r1 = Mock(spec=remote.Remote) + r1.configure_mock(name='r1') + ret1 = Mock(spec=run.RemoteProcess) + r1.run.return_value = ret1 + r2 = Mock(spec=remote.Remote) + r2.configure_mock(name='r2') + ret2 = Mock(spec=run.RemoteProcess) + r2.run.return_value = ret2 + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['baz']), + ], + ) + assert r1.run.called_once_with(args=['test']) + assert r2.run.called_once_with(args=['test']) + got = c.run(args=['test']) + assert len(got) == 2 + assert got, [ret1 == ret2] + # check identity not equality + assert got[0] is ret1 + assert got[1] is ret2 + + def test_only_one(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_foo = c.only('foo') + assert c_foo.remotes == {r1: ['foo', 'bar'], r3: ['foo']} + + def test_only_two(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_both = c.only('foo', 'bar') + assert c_both.remotes, {r1: ['foo' == 'bar']} + + def test_only_none(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_none = c.only('impossible') + assert c_none.remotes == {} + + def test_only_match(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_foo = c.only('foo', lambda role: role.startswith('b')) + assert c_foo.remotes, {r1: ['foo' == 'bar']} + + def test_exclude_one(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_foo = c.exclude('foo') + assert c_foo.remotes == {r2: ['bar']} + + def test_exclude_two(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_both = c.exclude('foo', 'bar') + assert c_both.remotes == {r2: ['bar'], r3: ['foo']} + + def test_exclude_none(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_none = c.exclude('impossible') + assert c_none.remotes == {r1: ['foo', 'bar'], r2: ['bar'], r3: ['foo']} + + def test_exclude_match(self): + r1 = Mock() + r2 = Mock() + r3 = Mock() + c = cluster.Cluster( + remotes=[ + (r1, ['foo', 'bar']), + (r2, ['bar']), + (r3, ['foo']), + ], + ) + c_foo = c.exclude('foo', lambda role: role.startswith('b')) + assert c_foo.remotes == {r2: ['bar'], r3: ['foo']} + + def test_filter(self): + r1 = Mock(_name='r1') + r2 = Mock(_name='r2') + def func(r): + return r._name == "r1" + c = cluster.Cluster(remotes=[ + (r1, ['foo']), + (r2, ['bar']), + ]) + assert c.filter(func).remotes == { + r1: ['foo'] + } + + +class TestWriteFile(object): + """ Tests for cluster.write_file """ + def setup(self): + self.r1 = remote.Remote('r1', ssh=Mock()) + self.c = cluster.Cluster( + remotes=[ + (self.r1, ['foo', 'bar']), + ], + ) + + @patch("teuthology.orchestra.remote.RemoteShell.write_file") + def test_write_file(self, m_write_file): + self.c.write_file("filename", "content") + m_write_file.assert_called_with("filename", "content") + + @patch("teuthology.orchestra.remote.RemoteShell.write_file") + def test_fails_with_invalid_perms(self, m_write_file): + with pytest.raises(ValueError): + self.c.write_file("filename", "content", sudo=False, perms="invalid") + + @patch("teuthology.orchestra.remote.RemoteShell.write_file") + def test_fails_with_invalid_owner(self, m_write_file): + with pytest.raises(ValueError): + self.c.write_file("filename", "content", sudo=False, owner="invalid") + + @patch("teuthology.orchestra.remote.RemoteShell.write_file") + def test_with_sudo(self, m_write_file): + self.c.write_file("filename", "content", sudo=True) + m_write_file.assert_called_with("filename", "content", sudo=True, owner=None, mode=None) diff --git a/teuthology/orchestra/test/test_connection.py b/teuthology/orchestra/test/test_connection.py new file mode 100644 index 0000000000..487632deb6 --- /dev/null +++ b/teuthology/orchestra/test/test_connection.py @@ -0,0 +1,119 @@ +from mock import patch, Mock + +from teuthology import config +from teuthology.orchestra import connection +from teuthology.orchestra.test.util import assert_raises + + +class TestConnection(object): + def setup(self): + self.start_patchers() + + def teardown(self): + self.stop_patchers() + + def start_patchers(self): + self.patcher_sleep = patch( + 'time.sleep', + ) + self.patcher_sleep.start() + self.m_ssh = Mock() + self.patcher_ssh = patch( + 'teuthology.orchestra.connection.paramiko.SSHClient', + self.m_ssh, + ) + self.patcher_ssh.start() + + def stop_patchers(self): + self.patcher_ssh.stop() + self.patcher_sleep.stop() + + def clear_config(self): + config.config.teuthology_yaml = '' + config.config.load() + + def test_split_user_just_host(self): + got = connection.split_user('somehost.example.com') + assert got == (None, 'somehost.example.com') + + def test_split_user_both(self): + got = connection.split_user('jdoe@somehost.example.com') + assert got == ('jdoe', 'somehost.example.com') + + def test_split_user_empty_user(self): + s = '@somehost.example.com' + e = assert_raises(AssertionError, connection.split_user, s) + assert str(e) == 'Bad input to split_user: {s!r}'.format(s=s) + + def test_connect(self): + self.clear_config() + config.config.verify_host_keys = True + m_ssh_instance = self.m_ssh.return_value = Mock(); + m_transport = Mock() + m_ssh_instance.get_transport.return_value = m_transport + got = connection.connect( + 'jdoe@orchestra.test.newdream.net.invalid', + _SSHClient=self.m_ssh, + ) + self.m_ssh.assert_called_once() + m_ssh_instance.set_missing_host_key_policy.assert_called_once() + m_ssh_instance.load_system_host_keys.assert_called_once_with() + m_ssh_instance.connect.assert_called_once_with( + hostname='orchestra.test.newdream.net.invalid', + username='jdoe', + timeout=60, + ) + m_transport.set_keepalive.assert_called_once_with(False) + assert got is m_ssh_instance + + def test_connect_no_verify_host_keys(self): + self.clear_config() + config.config.verify_host_keys = False + m_ssh_instance = self.m_ssh.return_value = Mock(); + m_transport = Mock() + m_ssh_instance.get_transport.return_value = m_transport + got = connection.connect( + 'jdoe@orchestra.test.newdream.net.invalid', + _SSHClient=self.m_ssh, + ) + self.m_ssh.assert_called_once() + m_ssh_instance.set_missing_host_key_policy.assert_called_once() + assert not m_ssh_instance.load_system_host_keys.called + m_ssh_instance.connect.assert_called_once_with( + hostname='orchestra.test.newdream.net.invalid', + username='jdoe', + timeout=60, + ) + m_transport.set_keepalive.assert_called_once_with(False) + assert got is m_ssh_instance + + def test_connect_override_hostkeys(self): + self.clear_config() + m_ssh_instance = self.m_ssh.return_value = Mock(); + m_transport = Mock() + m_ssh_instance.get_transport.return_value = m_transport + m_host_keys = Mock() + m_ssh_instance.get_host_keys.return_value = m_host_keys + m_create_key = Mock() + m_create_key.return_value = "frobnitz" + got = connection.connect( + 'jdoe@orchestra.test.newdream.net.invalid', + host_key='ssh-rsa testkey', + _SSHClient=self.m_ssh, + _create_key=m_create_key, + ) + self.m_ssh.assert_called_once() + m_ssh_instance.get_host_keys.assert_called_once() + m_host_keys.add.assert_called_once_with( + hostname='orchestra.test.newdream.net.invalid', + keytype='ssh-rsa', + key='frobnitz', + ) + m_create_key.assert_called_once_with('ssh-rsa', 'testkey') + m_ssh_instance.connect.assert_called_once_with( + hostname='orchestra.test.newdream.net.invalid', + username='jdoe', + timeout=60, + ) + m_transport.set_keepalive.assert_called_once_with(False) + assert got is m_ssh_instance diff --git a/teuthology/orchestra/test/test_console.py b/teuthology/orchestra/test/test_console.py new file mode 100644 index 0000000000..0338a577ba --- /dev/null +++ b/teuthology/orchestra/test/test_console.py @@ -0,0 +1,217 @@ +from mock import patch + +from teuthology.config import config as teuth_config + +from teuthology.orchestra import console + + +class TestConsole(object): + pass + + +class TestPhysicalConsole(TestConsole): + klass = console.PhysicalConsole + ipmi_cmd_templ = 'ipmitool -H {h}.{d} -I lanplus -U {u} -P {p} {c}' + conserver_cmd_templ = 'console -M {m} -p {p} {mode} {h}' + + def setup(self): + self.hostname = 'host' + teuth_config.ipmi_domain = 'ipmi_domain' + teuth_config.ipmi_user = 'ipmi_user' + teuth_config.ipmi_password = 'ipmi_pass' + teuth_config.conserver_master = 'conserver_master' + teuth_config.conserver_port = 3109 + teuth_config.use_conserver = True + + def test_has_ipmi_creds(self): + cons = self.klass(self.hostname) + assert cons.has_ipmi_credentials is True + teuth_config.ipmi_domain = None + cons = self.klass(self.hostname) + assert cons.has_ipmi_credentials is False + + def test_console_command_conserver(self): + cons = self.klass( + self.hostname, + teuth_config.ipmi_user, + teuth_config.ipmi_password, + teuth_config.ipmi_domain, + ) + cons.has_conserver = True + console_cmd = cons._console_command() + assert console_cmd == self.conserver_cmd_templ.format( + m=teuth_config.conserver_master, + p=teuth_config.conserver_port, + mode='-s', + h=self.hostname, + ) + console_cmd = cons._console_command(readonly=False) + assert console_cmd == self.conserver_cmd_templ.format( + m=teuth_config.conserver_master, + p=teuth_config.conserver_port, + mode='-f', + h=self.hostname, + ) + + def test_console_command_ipmi(self): + teuth_config.conserver_master = None + cons = self.klass( + self.hostname, + teuth_config.ipmi_user, + teuth_config.ipmi_password, + teuth_config.ipmi_domain, + ) + sol_cmd = cons._console_command() + assert sol_cmd == self.ipmi_cmd_templ.format( + h=self.hostname, + d=teuth_config.ipmi_domain, + u=teuth_config.ipmi_user, + p=teuth_config.ipmi_password, + c='sol activate', + ) + + def test_ipmi_command_ipmi(self): + cons = self.klass( + self.hostname, + teuth_config.ipmi_user, + teuth_config.ipmi_password, + teuth_config.ipmi_domain, + ) + pc_cmd = cons._ipmi_command('power cycle') + assert pc_cmd == self.ipmi_cmd_templ.format( + h=self.hostname, + d=teuth_config.ipmi_domain, + u=teuth_config.ipmi_user, + p=teuth_config.ipmi_password, + c='power cycle', + ) + + def test_spawn_log_conserver(self): + with patch( + 'teuthology.orchestra.console.psutil.subprocess.Popen', + autospec=True, + ) as m_popen: + m_popen.return_value.pid = 42 + m_popen.return_value.returncode = 0 + m_popen.return_value.wait.return_value = 0 + cons = self.klass(self.hostname) + assert cons.has_conserver is True + m_popen.reset_mock() + m_popen.return_value.poll.return_value = None + cons.spawn_sol_log('/fake/path') + assert m_popen.call_count == 1 + call_args = m_popen.call_args_list[0][0][0] + assert any( + [teuth_config.conserver_master in arg for arg in call_args] + ) + + def test_spawn_log_ipmi(self): + with patch( + 'teuthology.orchestra.console.psutil.subprocess.Popen', + autospec=True, + ) as m_popen: + m_popen.return_value.pid = 42 + m_popen.return_value.returncode = 1 + m_popen.return_value.wait.return_value = 1 + cons = self.klass(self.hostname) + assert cons.has_conserver is False + m_popen.reset_mock() + m_popen.return_value.poll.return_value = 1 + cons.spawn_sol_log('/fake/path') + assert m_popen.call_count == 1 + call_args = m_popen.call_args_list[0][0][0] + assert any( + ['ipmitool' in arg for arg in call_args] + ) + + def test_spawn_log_fallback(self): + with patch( + 'teuthology.orchestra.console.psutil.subprocess.Popen', + autospec=True, + ) as m_popen: + m_popen.return_value.pid = 42 + m_popen.return_value.returncode = 0 + m_popen.return_value.wait.return_value = 0 + cons = self.klass(self.hostname) + assert cons.has_conserver is True + m_popen.reset_mock() + m_popen.return_value.poll.return_value = 1 + cons.spawn_sol_log('/fake/path') + assert cons.has_conserver is False + assert m_popen.call_count == 2 + call_args = m_popen.call_args_list[1][0][0] + assert any( + ['ipmitool' in arg for arg in call_args] + ) + + def test_get_console_conserver(self): + with patch( + 'teuthology.orchestra.console.psutil.subprocess.Popen', + autospec=True, + ) as m_popen: + m_popen.return_value.pid = 42 + m_popen.return_value.returncode = 0 + m_popen.return_value.wait.return_value = 0 + cons = self.klass(self.hostname) + assert cons.has_conserver is True + with patch( + 'teuthology.orchestra.console.pexpect.spawn', + autospec=True, + ) as m_spawn: + cons._get_console() + assert m_spawn.call_count == 1 + assert teuth_config.conserver_master in \ + m_spawn.call_args_list[0][0][0] + + def test_get_console_ipmitool(self): + with patch( + 'teuthology.orchestra.console.psutil.subprocess.Popen', + autospec=True, + ) as m_popen: + m_popen.return_value.pid = 42 + m_popen.return_value.returncode = 0 + m_popen.return_value.wait.return_value = 0 + cons = self.klass(self.hostname) + assert cons.has_conserver is True + with patch( + 'teuthology.orchestra.console.pexpect.spawn', + autospec=True, + ) as m_spawn: + cons.has_conserver = False + cons._get_console() + assert m_spawn.call_count == 1 + assert 'ipmitool' in m_spawn.call_args_list[0][0][0] + + def test_get_console_fallback(self): + with patch( + 'teuthology.orchestra.console.psutil.subprocess.Popen', + autospec=True, + ) as m_popen: + m_popen.return_value.pid = 42 + m_popen.return_value.returncode = 0 + m_popen.return_value.wait.return_value = 0 + cons = self.klass(self.hostname) + assert cons.has_conserver is True + with patch( + 'teuthology.orchestra.console.pexpect.spawn', + autospec=True, + ) as m_spawn: + cons.has_conserver = True + m_spawn.return_value.isalive.return_value = False + cons._get_console() + assert m_spawn.return_value.isalive.call_count == 1 + assert m_spawn.call_count == 2 + assert cons.has_conserver is False + assert 'ipmitool' in m_spawn.call_args_list[1][0][0] + + def test_disable_conserver(self): + with patch( + 'teuthology.orchestra.console.psutil.subprocess.Popen', + autospec=True, + ) as m_popen: + m_popen.return_value.pid = 42 + m_popen.return_value.returncode = 0 + m_popen.return_value.wait.return_value = 0 + teuth_config.use_conserver = False + cons = self.klass(self.hostname) + assert cons.has_conserver is False diff --git a/teuthology/orchestra/test/test_opsys.py b/teuthology/orchestra/test/test_opsys.py new file mode 100644 index 0000000000..c8f6e0bd3d --- /dev/null +++ b/teuthology/orchestra/test/test_opsys.py @@ -0,0 +1,404 @@ +from textwrap import dedent +from teuthology.orchestra.opsys import OS +import pytest + + +class TestOS(object): + str_centos_7_os_release = dedent(""" + NAME="CentOS Linux" + VERSION="7 (Core)" + ID="centos" + ID_LIKE="rhel fedora" + VERSION_ID="7" + PRETTY_NAME="CentOS Linux 7 (Core)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:centos:centos:7" + HOME_URL="https://www.centos.org/" + BUG_REPORT_URL="https://bugs.centos.org/" + """) + + str_centos_7_os_release_newer = dedent(""" + NAME="CentOS Linux" + VERSION="7 (Core)" + ID="centos" + ID_LIKE="rhel fedora" + VERSION_ID="7" + PRETTY_NAME="CentOS Linux 7 (Core)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:centos:centos:7" + HOME_URL="https://www.centos.org/" + BUG_REPORT_URL="https://bugs.centos.org/" + + CENTOS_MANTISBT_PROJECT="CentOS-7" + CENTOS_MANTISBT_PROJECT_VERSION="7" + REDHAT_SUPPORT_PRODUCT="centos" + REDHAT_SUPPORT_PRODUCT_VERSION="7" + """) + + str_debian_7_lsb_release = dedent(""" + Distributor ID: Debian + Description: Debian GNU/Linux 7.1 (wheezy) + Release: 7.1 + Codename: wheezy + """) + + str_debian_7_os_release = dedent(""" + PRETTY_NAME="Debian GNU/Linux 7 (wheezy)" + NAME="Debian GNU/Linux" + VERSION_ID="7" + VERSION="7 (wheezy)" + ID=debian + ANSI_COLOR="1;31" + HOME_URL="http://www.debian.org/" + SUPPORT_URL="http://www.debian.org/support/" + BUG_REPORT_URL="http://bugs.debian.org/" + """) + + str_debian_8_os_release = dedent(""" + PRETTY_NAME="Debian GNU/Linux 8 (jessie)" + NAME="Debian GNU/Linux" + VERSION_ID="8" + VERSION="8 (jessie)" + ID=debian + HOME_URL="http://www.debian.org/" + SUPPORT_URL="http://www.debian.org/support/" + BUG_REPORT_URL="https://bugs.debian.org/" + """) + + str_debian_9_os_release = dedent(""" + PRETTY_NAME="Debian GNU/Linux 9 (stretch)" + NAME="Debian GNU/Linux" + VERSION_ID="9" + VERSION="9 (stretch)" + ID=debian + HOME_URL="https://www.debian.org/" + SUPPORT_URL="https://www.debian.org/support" + BUG_REPORT_URL="https://bugs.debian.org/" + """) + + str_ubuntu_12_04_lsb_release = dedent(""" + Distributor ID: Ubuntu + Description: Ubuntu 12.04.4 LTS + Release: 12.04 + Codename: precise + """) + + str_ubuntu_12_04_os_release = dedent(""" + NAME="Ubuntu" + VERSION="12.04.4 LTS, Precise Pangolin" + ID=ubuntu + ID_LIKE=debian + PRETTY_NAME="Ubuntu precise (12.04.4 LTS)" + VERSION_ID="12.04" + """) + + str_ubuntu_14_04_os_release = dedent(""" + NAME="Ubuntu" + VERSION="14.04.4 LTS, Trusty Tahr" + ID=ubuntu + ID_LIKE=debian + PRETTY_NAME="Ubuntu 14.04.4 LTS" + VERSION_ID="14.04" + HOME_URL="http://www.ubuntu.com/" + SUPPORT_URL="http://help.ubuntu.com/" + BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" + """) + + str_ubuntu_16_04_os_release = dedent(""" + NAME="Ubuntu" + VERSION="16.04 LTS (Xenial Xerus)" + ID=ubuntu + ID_LIKE=debian + PRETTY_NAME="Ubuntu 16.04 LTS" + VERSION_ID="16.04" + HOME_URL="http://www.ubuntu.com/" + SUPPORT_URL="http://help.ubuntu.com/" + BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" + UBUNTU_CODENAME=xenial + """) + + str_ubuntu_18_04_os_release = dedent(""" + NAME="Ubuntu" + VERSION="18.04 LTS (Bionic Beaver)" + ID=ubuntu + ID_LIKE=debian + PRETTY_NAME="Ubuntu 18.04 LTS" + VERSION_ID="18.04" + HOME_URL="https://www.ubuntu.com/" + SUPPORT_URL="https://help.ubuntu.com/" + BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" + PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" + VERSION_CODENAME=bionic + UBUNTU_CODENAME=bionic + """) + + str_rhel_6_4_lsb_release = dedent(""" + LSB Version: :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch + Distributor ID: RedHatEnterpriseServer + Description: Red Hat Enterprise Linux Server release 6.4 (Santiago) + Release: 6.4 + Codename: Santiago + """) + + str_rhel_7_lsb_release = dedent(""" + LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch + Distributor ID: RedHatEnterpriseServer + Description: Red Hat Enterprise Linux Server release 7.0 (Maipo) + Release: 7.0 + Codename: Maipo + """) + + str_rhel_7_os_release = dedent(""" + NAME="Red Hat Enterprise Linux Server" + VERSION="7.0 (Maipo)" + ID="rhel" + ID_LIKE="fedora" + VERSION_ID="7.0" + PRETTY_NAME="Red Hat Enterprise Linux Server 7.0 (Maipo)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:redhat:enterprise_linux:7.0:GA:server" + HOME_URL="https://www.redhat.com/" + BUG_REPORT_URL="https://bugzilla.redhat.com/" + + REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.0 + REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" + REDHAT_SUPPORT_PRODUCT_VERSION=7.0 + """) + + str_fedora_26_os_release = dedent(""" + NAME=Fedora + VERSION="26 (Twenty Six)" + ID=fedora + VERSION_ID=26 + PRETTY_NAME="Fedora 26 (Twenty Six)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:fedoraproject:fedora:26" + HOME_URL="https://fedoraproject.org/" + BUG_REPORT_URL="https://bugzilla.redhat.com/" + REDHAT_BUGZILLA_PRODUCT="Fedora" + REDHAT_BUGZILLA_PRODUCT_VERSION=26 + REDHAT_SUPPORT_PRODUCT="Fedora" + REDHAT_SUPPORT_PRODUCT_VERSION=26 + PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy + """) + + str_opensuse_42_2_os_release = dedent(""" + NAME="openSUSE Leap" + VERSION="42.2" + ID=opensuse + ID_LIKE="suse" + VERSION_ID="42.2" + PRETTY_NAME="openSUSE Leap 42.2" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:42.2" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" + """) + + str_opensuse_42_3_os_release = dedent(""" + NAME="openSUSE Leap" + VERSION="42.3" + ID=opensuse + ID_LIKE="suse" + VERSION_ID="42.3" + PRETTY_NAME="openSUSE Leap 42.3" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:42.3" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" + """) + + str_opensuse_15_0_os_release = dedent(""" + NAME="openSUSE Leap" + VERSION="15.0" + ID="opensuse-leap" + ID_LIKE="suse opensuse" + VERSION_ID="15.0" + PRETTY_NAME="openSUSE Leap 15.0" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:15.0" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" + """) + + str_opensuse_15_1_os_release = dedent(""" + NAME="openSUSE Leap" + VERSION="15.1" + ID="opensuse-leap" + ID_LIKE="suse opensuse" + VERSION_ID="15.1" + PRETTY_NAME="openSUSE Leap 15.1" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:15.1" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" + """) + + def test_centos_7_os_release(self): + os = OS.from_os_release(self.str_centos_7_os_release) + assert os.name == 'centos' + assert os.version == '7' + assert os.codename == 'core' + assert os.package_type == 'rpm' + + def test_centos_7_os_release_newer(self): + os = OS.from_os_release(self.str_centos_7_os_release_newer) + assert os.name == 'centos' + assert os.version == '7' + assert os.codename == 'core' + assert os.package_type == 'rpm' + + def test_debian_7_lsb_release(self): + os = OS.from_lsb_release(self.str_debian_7_lsb_release) + assert os.name == 'debian' + assert os.version == '7.1' + assert os.codename == 'wheezy' + assert os.package_type == 'deb' + + def test_debian_7_os_release(self): + os = OS.from_os_release(self.str_debian_7_os_release) + assert os.name == 'debian' + assert os.version == '7' + assert os.codename == 'wheezy' + assert os.package_type == 'deb' + + def test_debian_8_os_release(self): + os = OS.from_os_release(self.str_debian_8_os_release) + assert os.name == 'debian' + assert os.version == '8' + assert os.codename == 'jessie' + assert os.package_type == 'deb' + + def test_debian_9_os_release(self): + os = OS.from_os_release(self.str_debian_9_os_release) + assert os.name == 'debian' + assert os.version == '9' + assert os.codename == 'stretch' + assert os.package_type == 'deb' + + def test_ubuntu_12_04_lsb_release(self): + os = OS.from_lsb_release(self.str_ubuntu_12_04_lsb_release) + assert os.name == 'ubuntu' + assert os.version == '12.04' + assert os.codename == 'precise' + assert os.package_type == 'deb' + + def test_ubuntu_12_04_os_release(self): + os = OS.from_os_release(self.str_ubuntu_12_04_os_release) + assert os.name == 'ubuntu' + assert os.version == '12.04' + assert os.codename == 'precise' + assert os.package_type == 'deb' + + def test_ubuntu_14_04_os_release(self): + os = OS.from_os_release(self.str_ubuntu_14_04_os_release) + assert os.name == 'ubuntu' + assert os.version == '14.04' + assert os.codename == 'trusty' + assert os.package_type == 'deb' + + def test_ubuntu_16_04_os_release(self): + os = OS.from_os_release(self.str_ubuntu_16_04_os_release) + assert os.name == 'ubuntu' + assert os.version == '16.04' + assert os.codename == 'xenial' + assert os.package_type == 'deb' + + def test_ubuntu_18_04_os_release(self): + os = OS.from_os_release(self.str_ubuntu_18_04_os_release) + assert os.name == 'ubuntu' + assert os.version == '18.04' + assert os.codename == 'bionic' + assert os.package_type == 'deb' + + def test_rhel_6_4_lsb_release(self): + os = OS.from_lsb_release(self.str_rhel_6_4_lsb_release) + assert os.name == 'rhel' + assert os.version == '6.4' + assert os.codename == 'santiago' + assert os.package_type == 'rpm' + + def test_rhel_7_lsb_release(self): + os = OS.from_lsb_release(self.str_rhel_7_lsb_release) + assert os.name == 'rhel' + assert os.version == '7.0' + assert os.codename == 'maipo' + assert os.package_type == 'rpm' + + def test_rhel_7_os_release(self): + os = OS.from_os_release(self.str_rhel_7_os_release) + assert os.name == 'rhel' + assert os.version == '7.0' + assert os.codename == 'maipo' + assert os.package_type == 'rpm' + + def test_fedora_26_os_release(self): + os = OS.from_os_release(self.str_fedora_26_os_release) + assert os.name == 'fedora' + assert os.version == '26' + assert os.codename == '26' + assert os.package_type == 'rpm' + + def test_opensuse_42_2_os_release(self): + os = OS.from_os_release(self.str_opensuse_42_2_os_release) + assert os.name == 'opensuse' + assert os.version == '42.2' + assert os.codename == 'leap' + assert os.package_type == 'rpm' + + def test_opensuse_42_3_os_release(self): + os = OS.from_os_release(self.str_opensuse_42_3_os_release) + assert os.name == 'opensuse' + assert os.version == '42.3' + assert os.codename == 'leap' + assert os.package_type == 'rpm' + + def test_opensuse_15_0_os_release(self): + os = OS.from_os_release(self.str_opensuse_15_0_os_release) + assert os.name == 'opensuse' + assert os.version == '15.0' + assert os.codename == 'leap' + assert os.package_type == 'rpm' + + def test_opensuse_15_1_os_release(self): + os = OS.from_os_release(self.str_opensuse_15_1_os_release) + assert os.name == 'opensuse' + assert os.version == '15.1' + assert os.codename == 'leap' + assert os.package_type == 'rpm' + + def test_version_codename_success(self): + assert OS.version_codename('ubuntu', '14.04') == ('14.04', 'trusty') + assert OS.version_codename('ubuntu', 'trusty') == ('14.04', 'trusty') + + def test_version_codename_failure(self): + with pytest.raises(KeyError) as excinfo: + OS.version_codename('ubuntu', 'frog') + assert excinfo.type == KeyError + assert 'frog' in excinfo.value.args[0] + + def test_repr(self): + os = OS(name='NAME', version='0.1.2', codename='code') + assert repr(os) == "OS(name='NAME', version='0.1.2', codename='code')" + + def test_to_dict(self): + os = OS(name='NAME', version='0.1.2', codename='code') + ref_dict = dict(name='NAME', version='0.1.2', codename='code') + assert os.to_dict() == ref_dict + + def test_version_no_codename(self): + os = OS(name='ubuntu', version='16.04') + assert os.codename == 'xenial' + + def test_codename_no_version(self): + os = OS(name='ubuntu', codename='trusty') + assert os.version == '14.04' + + def test_eq_equal(self): + os = OS(name='ubuntu', codename='trusty', version='14.04') + assert OS(name='ubuntu', codename='trusty', version='14.04') == os + + def test_eq_not_equal(self): + os = OS(name='ubuntu', codename='trusty', version='16.04') + assert OS(name='ubuntu', codename='trusty', version='14.04') != os diff --git a/teuthology/orchestra/test/test_remote.py b/teuthology/orchestra/test/test_remote.py new file mode 100644 index 0000000000..5fa5797eca --- /dev/null +++ b/teuthology/orchestra/test/test_remote.py @@ -0,0 +1,205 @@ +from mock import patch, Mock, MagicMock + +from io import BytesIO + +from teuthology.orchestra import remote +from teuthology.orchestra import opsys +from teuthology.orchestra.run import RemoteProcess + + +class TestRemote(object): + + def setup(self): + self.start_patchers() + + def teardown(self): + self.stop_patchers() + + def start_patchers(self): + self.m_ssh = MagicMock() + self.patcher_ssh = patch( + 'teuthology.orchestra.connection.paramiko.SSHClient', + self.m_ssh, + ) + self.patcher_ssh.start() + + def stop_patchers(self): + self.patcher_ssh.stop() + + def test_shortname(self): + r = remote.Remote( + name='jdoe@xyzzy.example.com', + shortname='xyz', + ssh=self.m_ssh, + ) + assert r.shortname == 'xyz' + assert str(r) == 'jdoe@xyzzy.example.com' + + def test_shortname_default(self): + r = remote.Remote( + name='jdoe@xyzzy.example.com', + ssh=self.m_ssh, + ) + assert r.shortname == 'xyzzy' + assert str(r) == 'jdoe@xyzzy.example.com' + + def test_run(self): + m_transport = MagicMock() + m_transport.getpeername.return_value = ('name', 22) + self.m_ssh.get_transport.return_value = m_transport + m_run = MagicMock() + args = [ + 'something', + 'more', + ] + proc = RemoteProcess( + client=self.m_ssh, + args=args, + ) + m_run.return_value = proc + rem = remote.Remote(name='jdoe@xyzzy.example.com', ssh=self.m_ssh) + rem._runner = m_run + result = rem.run(args=args) + assert m_transport.getpeername.called_once_with() + assert m_run.called_once_with(args=args) + assert result is proc + assert result.remote is rem + + def test_hostname(self): + m_transport = MagicMock() + m_transport.getpeername.return_value = ('name', 22) + self.m_ssh.get_transport.return_value = m_transport + m_run = MagicMock() + args = [ + 'hostname', + '--fqdn', + ] + stdout = BytesIO(b'test_hostname') + stdout.seek(0) + proc = RemoteProcess( + client=self.m_ssh, + args=args, + ) + proc.stdout = stdout + proc._stdout_buf = Mock() + proc._stdout_buf.channel.recv_exit_status.return_value = 0 + r = remote.Remote(name='xyzzy.example.com', ssh=self.m_ssh) + m_run.return_value = proc + r._runner = m_run + assert r.hostname == 'test_hostname' + + def test_arch(self): + m_transport = MagicMock() + m_transport.getpeername.return_value = ('name', 22) + self.m_ssh.get_transport.return_value = m_transport + m_run = MagicMock() + args = [ + 'uname', + '-m', + ] + stdout = BytesIO(b'test_arch') + stdout.seek(0) + proc = RemoteProcess( + client=self.m_ssh, + args='fakey', + ) + proc._stdout_buf = Mock() + proc._stdout_buf.channel = Mock() + proc._stdout_buf.channel.recv_exit_status.return_value = 0 + proc._stdout_buf.channel.expects('recv_exit_status').returns(0) + proc.stdout = stdout + m_run.return_value = proc + r = remote.Remote(name='jdoe@xyzzy.example.com', ssh=self.m_ssh) + r._runner = m_run + assert m_transport.getpeername.called_once_with() + assert proc._stdout_buf.channel.recv_exit_status.called_once_with() + assert m_run.called_once_with( + client=self.m_ssh, + args=args, + stdout=BytesIO(), + name=r.shortname, + ) + assert r.arch == 'test_arch' + + def test_host_key(self): + m_key = MagicMock() + m_key.get_name.return_value = 'key_type' + m_key.get_base64.return_value = 'test ssh key' + m_transport = MagicMock() + m_transport.get_remote_server_key.return_value = m_key + self.m_ssh.get_transport.return_value = m_transport + r = remote.Remote(name='jdoe@xyzzy.example.com', ssh=self.m_ssh) + assert r.host_key == 'key_type test ssh key' + self.m_ssh.get_transport.assert_called_once_with() + m_transport.get_remote_server_key.assert_called_once_with() + + def test_inventory_info(self): + r = remote.Remote('user@host', host_key='host_key') + r._arch = 'arch' + r._os = opsys.OS(name='os_name', version='1.2.3', codename='code') + inv_info = r.inventory_info + assert inv_info == dict( + name='host', + user='user', + arch='arch', + os_type='os_name', + os_version='1.2', + ssh_pub_key='host_key', + up=True, + ) + + def test_sftp_open_file(self): + m_file_obj = MagicMock() + m_stat = Mock() + m_stat.st_size = 42 + m_file_obj.stat.return_value = m_stat + m_open = MagicMock() + m_open.return_value = m_file_obj + m_open.return_value.__enter__.return_value = m_file_obj + with patch.object(remote.Remote, '_sftp_open_file', new=m_open): + rem = remote.Remote(name='jdoe@xyzzy.example.com', ssh=self.m_ssh) + assert rem._sftp_open_file('x') is m_file_obj + assert rem._sftp_open_file('x').stat() is m_stat + assert rem._sftp_open_file('x').stat().st_size == 42 + with rem._sftp_open_file('x') as f: + assert f == m_file_obj + + def test_sftp_get_size(self): + m_file_obj = MagicMock() + m_stat = Mock() + m_stat.st_size = 42 + m_file_obj.stat.return_value = m_stat + m_open = MagicMock() + m_open.return_value = m_file_obj + m_open.return_value.__enter__.return_value = m_file_obj + with patch.object(remote.Remote, '_sftp_open_file', new=m_open): + rem = remote.Remote(name='jdoe@xyzzy.example.com', ssh=self.m_ssh) + assert rem._sftp_get_size('/fake/file') == 42 + + def test_format_size(self): + assert remote.Remote._format_size(1023).strip() == '1023B' + assert remote.Remote._format_size(1024).strip() == '1KB' + assert remote.Remote._format_size(1024**2).strip() == '1MB' + assert remote.Remote._format_size(1024**5).strip() == '1TB' + assert remote.Remote._format_size(1021112).strip() == '997KB' + assert remote.Remote._format_size(1021112**2).strip() == '971GB' + + def test_is_container(self): + m_transport = MagicMock() + m_transport.getpeername.return_value = ('name', 22) + self.m_ssh.get_transport.return_value = m_transport + m_run = MagicMock() + args = [] + proc = RemoteProcess( + client=self.m_ssh, + args=args, + ) + proc.returncode = 0 + m_run.return_value = proc + rem = remote.Remote(name='jdoe@xyzzy.example.com', ssh=self.m_ssh) + rem._runner = m_run + assert rem.is_container + proc.returncode = 1 + rem2 = remote.Remote(name='jdoe@xyzzy.example.com', ssh=self.m_ssh) + rem2._runner = m_run + assert not rem2.is_container diff --git a/teuthology/orchestra/test/test_run.py b/teuthology/orchestra/test/test_run.py new file mode 100644 index 0000000000..074d90b05d --- /dev/null +++ b/teuthology/orchestra/test/test_run.py @@ -0,0 +1,286 @@ +from io import BytesIO + +import paramiko +import socket + +from mock import MagicMock, patch +from pytest import raises + +from teuthology.orchestra import run +from teuthology.exceptions import (CommandCrashedError, CommandFailedError, + ConnectionLostError) + +def set_buffer_contents(buf, contents): + buf.seek(0) + if isinstance(contents, bytes): + buf.write(contents) + elif isinstance(contents, (list, tuple)): + buf.writelines(contents) + elif isinstance(contents, str): + buf.write(contents.encode()) + else: + raise TypeError( + "%s is a %s; should be a byte string, list or tuple" % ( + contents, type(contents) + ) + ) + buf.seek(0) + + +class TestRun(object): + def setup(self): + self.start_patchers() + + def teardown(self): + self.stop_patchers() + + def start_patchers(self): + self.m_remote_process = MagicMock(wraps=run.RemoteProcess) + self.patcher_remote_proc = patch( + 'teuthology.orchestra.run.RemoteProcess', + self.m_remote_process, + ) + self.m_channel = MagicMock(spec=paramiko.Channel)() + """ + self.m_channelfile = MagicMock(wraps=paramiko.ChannelFile) + self.m_stdin_buf = self.m_channelfile(self.m_channel()) + self.m_stdout_buf = self.m_channelfile(self.m_channel()) + self.m_stderr_buf = self.m_channelfile(self.m_channel()) + """ + class M_ChannelFile(BytesIO): + channel = MagicMock(spec=paramiko.Channel)() + + self.m_channelfile = M_ChannelFile + self.m_stdin_buf = self.m_channelfile() + self.m_stdout_buf = self.m_channelfile() + self.m_stderr_buf = self.m_channelfile() + self.m_ssh = MagicMock() + self.m_ssh.exec_command.return_value = ( + self.m_stdin_buf, + self.m_stdout_buf, + self.m_stderr_buf, + ) + self.m_transport = MagicMock() + self.m_transport.getpeername.return_value = ('name', 22) + self.m_ssh.get_transport.return_value = self.m_transport + self.patcher_ssh = patch( + 'teuthology.orchestra.connection.paramiko.SSHClient', + self.m_ssh, + ) + self.patcher_ssh.start() + # Tests must start this if they wish to use it + # self.patcher_remote_proc.start() + + def stop_patchers(self): + # If this patcher wasn't started, it's ok + try: + self.patcher_remote_proc.stop() + except RuntimeError: + pass + self.patcher_ssh.stop() + + def test_exitstatus(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 0 + proc = run.run( + client=self.m_ssh, + args=['foo', 'bar baz'], + ) + assert proc.exitstatus == 0 + + def test_run_cwd(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 0 + run.run( + client=self.m_ssh, + args=['foo_bar_baz'], + cwd='/cwd/test', + ) + self.m_ssh.exec_command.assert_called_with('(cd /cwd/test && exec foo_bar_baz)') + + def test_capture_stdout(self): + output = 'foo\nbar' + set_buffer_contents(self.m_stdout_buf, output) + self.m_stdout_buf.channel.recv_exit_status.return_value = 0 + stdout = BytesIO() + proc = run.run( + client=self.m_ssh, + args=['foo', 'bar baz'], + stdout=stdout, + ) + assert proc.stdout is stdout + assert proc.stdout.read().decode() == output + assert proc.stdout.getvalue().decode() == output + + def test_capture_stderr_newline(self): + output = 'foo\nbar\n' + set_buffer_contents(self.m_stderr_buf, output) + self.m_stderr_buf.channel.recv_exit_status.return_value = 0 + stderr = BytesIO() + proc = run.run( + client=self.m_ssh, + args=['foo', 'bar baz'], + stderr=stderr, + ) + assert proc.stderr is stderr + assert proc.stderr.read().decode() == output + assert proc.stderr.getvalue().decode() == output + + def test_status_bad(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 42 + with raises(CommandFailedError) as exc: + run.run( + client=self.m_ssh, + args=['foo'], + ) + assert str(exc.value) == "Command failed on name with status 42: 'foo'" + + def test_status_bad_nocheck(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 42 + proc = run.run( + client=self.m_ssh, + args=['foo'], + check_status=False, + ) + assert proc.exitstatus == 42 + + def test_status_crash(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = -1 + with raises(CommandCrashedError) as exc: + run.run( + client=self.m_ssh, + args=['foo'], + ) + assert str(exc.value) == "Command crashed: 'foo'" + + def test_status_crash_nocheck(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = -1 + proc = run.run( + client=self.m_ssh, + args=['foo'], + check_status=False, + ) + assert proc.exitstatus == -1 + + def test_status_lost(self): + m_transport = MagicMock() + m_transport.getpeername.return_value = ('name', 22) + m_transport.is_active.return_value = False + self.m_stdout_buf.channel.recv_exit_status.return_value = -1 + self.m_ssh.get_transport.return_value = m_transport + with raises(ConnectionLostError) as exc: + run.run( + client=self.m_ssh, + args=['foo'], + ) + assert str(exc.value) == "SSH connection to name was lost: 'foo'" + + def test_status_lost_socket(self): + m_transport = MagicMock() + m_transport.getpeername.side_effect = socket.error + self.m_ssh.get_transport.return_value = m_transport + with raises(ConnectionLostError) as exc: + run.run( + client=self.m_ssh, + args=['foo'], + ) + assert str(exc.value) == "SSH connection was lost: 'foo'" + + def test_status_lost_nocheck(self): + m_transport = MagicMock() + m_transport.getpeername.return_value = ('name', 22) + m_transport.is_active.return_value = False + self.m_stdout_buf.channel.recv_exit_status.return_value = -1 + self.m_ssh.get_transport.return_value = m_transport + proc = run.run( + client=self.m_ssh, + args=['foo'], + check_status=False, + ) + assert proc.exitstatus == -1 + + def test_status_bad_nowait(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 42 + proc = run.run( + client=self.m_ssh, + args=['foo'], + wait=False, + ) + with raises(CommandFailedError) as exc: + proc.wait() + assert proc.returncode == 42 + assert str(exc.value) == "Command failed on name with status 42: 'foo'" + + def test_stdin_pipe(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 0 + proc = run.run( + client=self.m_ssh, + args=['foo'], + stdin=run.PIPE, + wait=False + ) + assert proc.poll() == 0 + code = proc.wait() + assert code == 0 + assert proc.exitstatus == 0 + + def test_stdout_pipe(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 0 + lines = [b'one\n', b'two', b''] + set_buffer_contents(self.m_stdout_buf, lines) + proc = run.run( + client=self.m_ssh, + args=['foo'], + stdout=run.PIPE, + wait=False + ) + assert proc.poll() == 0 + assert proc.stdout.readline() == lines[0] + assert proc.stdout.readline() == lines[1] + assert proc.stdout.readline() == lines[2] + code = proc.wait() + assert code == 0 + assert proc.exitstatus == 0 + + def test_stderr_pipe(self): + self.m_stdout_buf.channel.recv_exit_status.return_value = 0 + lines = [b'one\n', b'two', b''] + set_buffer_contents(self.m_stderr_buf, lines) + proc = run.run( + client=self.m_ssh, + args=['foo'], + stderr=run.PIPE, + wait=False + ) + assert proc.poll() == 0 + assert proc.stderr.readline() == lines[0] + assert proc.stderr.readline() == lines[1] + assert proc.stderr.readline() == lines[2] + code = proc.wait() + assert code == 0 + assert proc.exitstatus == 0 + + def test_copy_and_close(self): + run.copy_and_close(None, MagicMock()) + run.copy_and_close('', MagicMock()) + run.copy_and_close(b'', MagicMock()) + + +class TestQuote(object): + def test_quote_simple(self): + got = run.quote(['a b', ' c', 'd e ']) + assert got == "'a b' ' c' 'd e '" + + def test_quote_and_quote(self): + got = run.quote(['echo', 'this && is embedded', '&&', + 'that was standalone']) + assert got == "echo 'this && is embedded' '&&' 'that was standalone'" + + def test_quote_and_raw(self): + got = run.quote(['true', run.Raw('&&'), 'echo', 'yay']) + assert got == "true && echo yay" + + +class TestRaw(object): + def test_eq(self): + str_ = "I am a raw something or other" + raw = run.Raw(str_) + assert raw == run.Raw(str_) diff --git a/teuthology/orchestra/test/test_systemd.py b/teuthology/orchestra/test/test_systemd.py new file mode 100644 index 0000000000..c7cb3425f7 --- /dev/null +++ b/teuthology/orchestra/test/test_systemd.py @@ -0,0 +1,54 @@ +import argparse +import os + +from logging import debug +from teuthology import misc +from teuthology.orchestra import cluster +from teuthology.orchestra.run import quote +from teuthology.orchestra.daemon.group import DaemonGroup +import subprocess + + +class FakeRemote(object): + pass + + +def test_pid(): + ctx = argparse.Namespace() + ctx.daemons = DaemonGroup(use_systemd=True) + remote = FakeRemote() + + ps_ef_output_path = os.path.join( + os.path.dirname(__file__), + "files/daemon-systemdstate-pid-ps-ef.output" + ) + + # patching ps -ef command output using a file + def sh(args): + args[0:2] = ["cat", ps_ef_output_path] + debug(args) + return subprocess.getoutput(quote(args)) + + remote.sh = sh + remote.init_system = 'systemd' + remote.shortname = 'host1' + + ctx.cluster = cluster.Cluster( + remotes=[ + (remote, ['rgw.0', 'mon.a', 'mgr.a', 'mds.a', 'osd.0']) + ], + ) + + for remote, roles in ctx.cluster.remotes.items(): + for role in roles: + _, rol, id_ = misc.split_role(role) + if any(rol.startswith(x) for x in ['mon', 'mgr', 'mds']): + ctx.daemons.register_daemon(remote, rol, remote.shortname) + else: + ctx.daemons.register_daemon(remote, rol, id_) + + for _, daemons in ctx.daemons.daemons.items(): + for daemon in daemons.values(): + pid = daemon.pid + debug(pid) + assert pid diff --git a/teuthology/orchestra/test/util.py b/teuthology/orchestra/test/util.py new file mode 100644 index 0000000000..4aedc2ee32 --- /dev/null +++ b/teuthology/orchestra/test/util.py @@ -0,0 +1,12 @@ +def assert_raises(excClass, callableObj, *args, **kwargs): + """ + Like unittest.TestCase.assertRaises, but returns the exception. + """ + try: + callableObj(*args, **kwargs) + except excClass as e: + return e + else: + if hasattr(excClass,'__name__'): excName = excClass.__name__ + else: excName = str(excClass) + raise AssertionError("%s not raised" % excName) diff --git a/teuthology/packaging.py b/teuthology/packaging.py new file mode 100644 index 0000000000..9aece1271d --- /dev/null +++ b/teuthology/packaging.py @@ -0,0 +1,1063 @@ +import logging +import ast +import re +import requests + +from teuthology.util.compat import urljoin, urlencode + +from collections import OrderedDict +from teuthology.util.compat import PY3 +if PY3: + from io import StringIO +else: + from io import BytesIO as StringIO +from teuthology import repo_utils + +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.exceptions import (VersionNotFoundError, CommitNotFoundError, + NoRemoteError) +from teuthology.misc import sudo_write_file +from teuthology.orchestra.opsys import OS, DEFAULT_OS_VERSION +from teuthology.orchestra.run import Raw + +log = logging.getLogger(__name__) + +''' +Map 'generic' package name to 'flavor-specific' package name. +If entry is None, either the package isn't known here, or +it's known but should not be installed on remotes of this flavor +''' + +_PACKAGE_MAP = { + 'sqlite': {'deb': 'sqlite3', 'rpm': None} +} + +''' +Map 'generic' service name to 'flavor-specific' service name. +''' +_SERVICE_MAP = { + 'httpd': {'deb': 'apache2', 'rpm': 'httpd'} +} + + +def get_package_name(pkg, rem): + """ + Find the remote-specific name of the generic 'pkg' + """ + flavor = rem.os.package_type + + try: + return _PACKAGE_MAP[pkg][flavor] + except KeyError: + return None + + +def get_service_name(service, rem): + """ + Find the remote-specific name of the generic 'service' + """ + flavor = rem.os.package_type + try: + return _SERVICE_MAP[service][flavor] + except KeyError: + return None + + +def install_package(package, remote): + """ + Install 'package' on 'remote' + Assumes repo has already been set up (perhaps with install_repo) + """ + log.info('Installing package %s on %s', package, remote) + flavor = remote.os.package_type + if flavor == 'deb': + pkgcmd = ['DEBIAN_FRONTEND=noninteractive', + 'sudo', + '-E', + 'apt-get', + '-y', + '--force-yes', + 'install', + '{package}'.format(package=package)] + elif flavor == 'rpm': + # FIXME: zypper + pkgcmd = ['sudo', + 'yum', + '-y', + 'install', + '{package}'.format(package=package)] + else: + log.error('install_package: bad flavor ' + flavor + '\n') + return False + return remote.run(args=pkgcmd) + + +def remove_package(package, remote): + """ + Remove package from remote + """ + flavor = remote.os.package_type + if flavor == 'deb': + pkgcmd = ['DEBIAN_FRONTEND=noninteractive', + 'sudo', + '-E', + 'apt-get', + '-y', + 'purge', + '{package}'.format(package=package)] + elif flavor == 'rpm': + # FIXME: zypper + pkgcmd = ['sudo', + 'yum', + '-y', + 'erase', + '{package}'.format(package=package)] + else: + log.error('remove_package: bad flavor ' + flavor + '\n') + return False + return remote.run(args=pkgcmd) + + +def get_koji_task_result(task_id, remote, ctx): + """ + Queries kojihub and retrieves information about + the given task_id. The package, koji, must be installed + on the remote for this command to work. + + We need a remote here because koji can only be installed + on rpm based machines and teuthology runs on Ubuntu. + + The results of the given task are returned. For example: + + { + 'brootid': 3303567, + 'srpms': [], + 'rpms': [ + 'tasks/6745/9666745/kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', + 'tasks/6745/9666745/kernel-modules-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', + ], + 'logs': [] + } + + :param task_id: The koji task_id we want to retrieve results for. + :param remote: The remote to run the koji command on. + :param ctx: The ctx from the current run, used to provide a + failure_reason and status if the koji command fails. + :returns: A python dict containing info about the task results. + """ + py_cmd = ('import koji; ' + 'hub = koji.ClientSession("{kojihub_url}"); ' + 'print(hub.getTaskResult({task_id}))') + py_cmd = py_cmd.format( + task_id=task_id, + kojihub_url=config.kojihub_url + ) + log.info("Querying kojihub for the result of task {0}".format(task_id)) + task_result = _run_python_command(py_cmd, remote, ctx) + return task_result + + +def get_koji_task_rpm_info(package, task_rpms): + """ + Extracts information about a given package from the provided + rpm results of a koji task. + + For example, if trying to retrieve the package 'kernel' from + the results of a task, the output would look like this: + + { + 'base_url': 'https://kojipkgs.fedoraproject.org/work/tasks/6745/9666745/', + 'rpm_name': 'kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm', + 'package_name': 'kernel', + 'version': '4.1.0-0.rc2.git2.1.fc23.x86_64', + } + + :param task_rpms: A list of rpms from a tasks reusults. + :param package: The name of the package to retrieve. + :returns: A python dict containing info about the package. + """ + result = dict() + result['package_name'] = package + found_pkg = _find_koji_task_result(package, task_rpms) + if not found_pkg: + raise RuntimeError("The package {pkg} was not found in: {rpms}".format( + pkg=package, + rpms=task_rpms, + )) + + path, rpm_name = found_pkg.rsplit("/", 1) + result['rpm_name'] = rpm_name + result['base_url'] = "{koji_task_url}/{path}/".format( + koji_task_url=config.koji_task_url, + path=path, + ) + # removes the package name from the beginning of rpm_name + version = rpm_name.split("{0}-".format(package), 1)[1] + # removes .rpm from the rpm_name + version = version.split(".rpm")[0] + result['version'] = version + return result + + +def _find_koji_task_result(package, rpm_list): + """ + Looks in the list of rpms from koji task results to see if + the package we are looking for is present. + + Returns the full list item, including the path, if found. + + If not found, returns None. + """ + for rpm in rpm_list: + if package == _get_koji_task_result_package_name(rpm): + return rpm + return None + + +def _get_koji_task_result_package_name(path): + """ + Strips the package name from a koji rpm result. + + This makes the assumption that rpm names are in the following + format: -...rpm + + For example, given a koji rpm result might look like: + + tasks/6745/9666745/kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm + + This method would return "kernel". + """ + filename = path.split('/')[-1] + trimmed = [] + for part in filename.split('-'): + # assumes that when the next part is not a digit + # we're past the name and at the version + if part[0].isdigit(): + return '-'.join(trimmed) + trimmed.append(part) + + return '-'.join(trimmed) + + +def get_koji_build_info(build_id, remote, ctx): + """ + Queries kojihub and retrieves information about + the given build_id. The package, koji, must be installed + on the remote for this command to work. + + We need a remote here because koji can only be installed + on rpm based machines and teuthology runs on Ubuntu. + + Here is an example of the build info returned: + + {'owner_name': 'kdreyer', 'package_name': 'ceph', + 'task_id': 8534149, 'completion_ts': 1421278726.1171, + 'creation_event_id': 10486804, 'creation_time': '2015-01-14 18:15:17.003134', + 'epoch': None, 'nvr': 'ceph-0.80.5-4.el7ost', 'name': 'ceph', + 'completion_time': '2015-01-14 18:38:46.1171', 'state': 1, 'version': '0.80.5', + 'volume_name': 'DEFAULT', 'release': '4.el7ost', 'creation_ts': 1421277317.00313, + 'package_id': 34590, 'id': 412677, 'volume_id': 0, 'owner_id': 2826 + } + + :param build_id: The koji build_id we want to retrieve info on. + :param remote: The remote to run the koji command on. + :param ctx: The ctx from the current run, used to provide a + failure_reason and status if the koji command fails. + :returns: A python dict containing info about the build. + """ + py_cmd = ('import koji; ' + 'hub = koji.ClientSession("{kojihub_url}"); ' + 'print(hub.getBuild({build_id}))') + py_cmd = py_cmd.format( + build_id=build_id, + kojihub_url=config.kojihub_url + ) + log.info('Querying kojihub for info on build {0}'.format(build_id)) + build_info = _run_python_command(py_cmd, remote, ctx) + return build_info + + +def _run_python_command(py_cmd, remote, ctx): + """ + Runs the given python code on the remote + and returns the stdout from the code as + a python object. + """ + proc = remote.run( + args=[ + 'python', '-c', py_cmd + ], + stdout=StringIO(), stderr=StringIO(), check_status=False + ) + if proc.exitstatus == 0: + # returns the __repr__ of a python dict + stdout = proc.stdout.getvalue().strip() + # take the __repr__ and makes it a python dict again + result = ast.literal_eval(stdout) + else: + msg = "Error running the following on {0}: {1}".format(remote, py_cmd) + log.error(msg) + log.error("stdout: {0}".format(proc.stdout.getvalue().strip())) + log.error("stderr: {0}".format(proc.stderr.getvalue().strip())) + ctx.summary["failure_reason"] = msg + ctx.summary["status"] = "dead" + raise RuntimeError(msg) + + return result + + +def get_kojiroot_base_url(build_info, arch="x86_64"): + """ + Builds the base download url for kojiroot given the current + build information. + + :param build_info: A dict of koji build information, possibly + retrieved from get_koji_build_info. + :param arch: The arch you want to download rpms for. + :returns: The base_url to use when downloading rpms + from brew. + """ + base_url = "{kojiroot}/{package_name}/{ver}/{rel}/{arch}/".format( + kojiroot=config.kojiroot_url, + package_name=build_info["package_name"], + ver=build_info["version"], + rel=build_info["release"], + arch=arch, + ) + return base_url + + +def get_koji_package_name(package, build_info, arch="x86_64"): + """ + Builds the package name for a brew rpm. + + :param package: The name of the package + :param build_info: A dict of koji build information, possibly + retrieved from get_brew_build_info. + :param arch: The arch you want to download rpms for. + :returns: A string representing the file name for the + requested package in koji. + """ + pkg_name = "{name}-{ver}-{rel}.{arch}.rpm".format( + name=package, + ver=build_info["version"], + rel=build_info["release"], + arch=arch, + ) + + return pkg_name + + +def get_package_version(remote, package): + installed_ver = None + if remote.os.package_type == "deb": + proc = remote.run( + args=[ + 'dpkg-query', '-W', '-f', '${Version}', package + ], + stdout=StringIO(), + ) + else: + proc = remote.run( + args=[ + 'rpm', '-q', package, '--qf', '%{VERSION}-%{RELEASE}' + ], + stdout=StringIO(), + ) + if proc.exitstatus == 0: + installed_ver = proc.stdout.getvalue().strip() + # Does this look like a version string? + # this assumes a version string starts with non-alpha characters + if installed_ver and re.match('^[^a-zA-Z]', installed_ver): + log.info("The installed version of {pkg} is {ver}".format( + pkg=package, + ver=installed_ver, + )) + else: + installed_ver = None + else: + # should this throw an exception and stop the job? + log.warning( + "Unable to determine if {pkg} is installed: {stdout}".format( + pkg=package, + stdout=proc.stdout.getvalue().strip(), + ) + ) + + return installed_ver + + +def _get_config_value_for_remote(ctx, remote, config, key): + """ + Look through config, and attempt to determine the "best" value to use + for a given key. For example, given:: + + config = { + 'all': + {'branch': 'main'}, + 'branch': 'next' + } + _get_config_value_for_remote(ctx, remote, config, 'branch') + + would return 'main'. + + :param ctx: the argparse.Namespace object + :param remote: the teuthology.orchestra.remote.Remote object + :param config: the config dict + :param key: the name of the value to retrieve + """ + roles = ctx.cluster.remotes[remote] if ctx else None + if 'all' in config: + return config['all'].get(key) + elif roles: + for role in roles: + if role in config and key in config[role]: + return config[role].get(key) + return config.get(key) + + +def _get_response(url, wait=False, sleep=15, tries=10): + with safe_while(sleep=sleep, tries=tries, _raise=False) as proceed: + while proceed(): + resp = requests.get(url) + if resp.ok: + log.info('Package found...') + break + + if not wait: + log.info( + 'Package is not found at: %s (got HTTP code %s)...', + url, + resp.status_code, + ) + break + + log.info( + 'Package not there yet (got HTTP code %s), waiting...', + resp.status_code, + ) + + return resp + + +class GitbuilderProject(object): + """ + Represents a project that is built by gitbuilder. + """ + # gitbuilder always uses this value + rpm_release = "1-0" + + def __init__(self, project, job_config, ctx=None, remote=None): + self.project = project + self.job_config = job_config + #TODO: we could get around the need for ctx by using a list + # of roles instead, ctx is only used in _get_config_value_for_remote. + self.ctx = ctx + self.remote = remote + + if remote and ctx: + self._init_from_remote() + else: + self._init_from_config() + + self.dist_release = self._get_dist_release() + + def _init_from_remote(self): + """ + Initializes the class from a teuthology.orchestra.remote.Remote object + """ + self.arch = self.remote.arch + self.os_type = self.remote.os.name + self.os_version = self.remote.os.version + self.codename = self.remote.os.codename + self.pkg_type = self.remote.system_type + self.distro = self._get_distro( + distro=self.remote.os.name, + version=self.remote.os.version, + codename=self.remote.os.codename, + ) + # when we're initializing with a remote we most likely have + # a task config, not the entire teuthology job config + self.flavor = self.job_config.get("flavor", "default") + self.tag = self.job_config.get("tag") + + def _init_from_config(self): + """ + Initializes the class from a teuthology job config + """ + self.arch = self.job_config.get('arch', 'x86_64') + self.os_type = self.job_config.get("os_type") + self.flavor = self.job_config.get("flavor") + self.codename = self.job_config.get("codename") + self.os_version = self._get_version() + # if os_version is given, prefer version/codename derived from it + if self.os_version: + self.os_version, self.codename = \ + OS.version_codename(self.os_type, self.os_version) + self.branch = self.job_config.get("branch") + self.tag = self.job_config.get("tag") + self.ref = self.job_config.get("ref") + self.distro = self._get_distro( + distro=self.os_type, + version=self.os_version, + codename=self.codename, + ) + self.pkg_type = "deb" if self.os_type.lower() in ( + "ubuntu", + "debian", + ) else "rpm" + + if not getattr(self, 'flavor'): + # avoiding circular imports + from teuthology.suite.util import get_install_task_flavor + # when we're initializing from a full teuthology config, not just a + # task config we need to make sure we're looking at the flavor for + # the install task + self.flavor = get_install_task_flavor(self.job_config) + + @property + def sha1(self): + """ + Performs a call to gitbuilder to retrieve the sha1 if not provided in + the job_config. The returned value is cached so that this call only + happens once. + + :returns: The sha1 of the project as a string. + """ + if not hasattr(self, "_sha1"): + self._sha1 = self.job_config.get('sha1') + if not self._sha1: + self._sha1 = self._get_package_sha1() + return self._sha1 + + @property + def version(self): + """ + Performs a call to gitubilder to retrieve the version number for the + project. The returned value is cached so that this call only happens + once. + + :returns: The version number of the project as a string. + """ + if not hasattr(self, '_version'): + self._version = self._get_package_version() + return self._version + + @property + def base_url(self): + """ + The base url that points at this project on gitbuilder. + + :returns: A string of the base url for this project + """ + return self._get_base_url() + + @property + def uri_reference(self): + """ + The URI reference that identifies what build of the project + we'd like to use. + + For example, the following could be returned:: + + ref/ + sha1/ + ref/ + + :returns: The uri_reference as a string. + """ + return self._get_uri_reference() + + def _get_dist_release(self): + version = self._parse_version(self.os_version) + if self.os_type in ('centos', 'rhel'): + return "el{0}".format(version) + elif self.os_type == "fedora": + return "fc{0}".format(version) + else: + # debian and ubuntu just use the distro name + return self.os_type + + @staticmethod + def _parse_version(version): + """ + Parses a distro version string and returns a modified string + that matches the format needed for the gitbuilder url. + + Minor version numbers are ignored. + """ + return version.split(".")[0] + + @classmethod + def _get_distro(cls, distro=None, version=None, codename=None): + """ + Given a distro and a version, returned the combined string + to use in a gitbuilder url. + + :param distro: The distro as a string + :param version: The version as a string + :param codename: The codename for the distro. + Used for deb based distros. + """ + if distro in ('centos', 'rhel'): + distro = "centos" + elif distro == "fedora": + distro = "fedora" + elif distro == "opensuse": + distro = "opensuse" + elif distro == "sle": + distro == "sle" + else: + # deb based systems use codename instead of a distro/version combo + if not codename: + # lookup codename based on distro string + codename = OS._version_to_codename(distro, version) + if not codename: + msg = "No codename found for: {distro} {version}".format( + distro=distro, + version=version, + ) + log.exception(msg) + raise RuntimeError() + return codename + + return "{distro}{version}".format( + distro=distro, + version=cls._parse_version(version), + ) + + def _get_version(self): + """ + Attempts to find the distro version from the job_config. + + If not found, it will return the default version for + the distro found in job_config. + + :returns: A string distro version + """ + version = self.job_config.get("os_version") + if not version: + version = DEFAULT_OS_VERSION.get(self.os_type) + + return str(version) + + def _get_uri_reference(self): + """ + Returns the URI reference that identifies what build of the project + we'd like to use. + + If a remote is given, it will attempt to read the config for the given + remote to find either a tag, branch or sha1 defined. If there is no + remote, the sha1 from the config will be used. + + If a tag, branch or sha1 can't be found it will default to use the + build from the main branch. + + :returns: A string URI. Ex: ref/main + """ + ref_name, ref_val = next(iter(self._choose_reference().items())) + if ref_name == 'sha1': + return 'sha1/%s' % ref_val + else: + return 'ref/%s' % ref_val + + def _choose_reference(self): + """ + Since it's only meaningful to search for one of: + ref, tag, branch, sha1 + Decide which to use. + + :returns: a single-key dict containing the name and value of the + reference to use, e.g. {'branch': 'main'} + """ + tag = branch = sha1 = None + if self.remote: + tag = _get_config_value_for_remote(self.ctx, self.remote, + self.job_config, 'tag') + branch = _get_config_value_for_remote(self.ctx, self.remote, + self.job_config, 'branch') + sha1 = _get_config_value_for_remote(self.ctx, self.remote, + self.job_config, 'sha1') + ref = None + else: + ref = self.ref + tag = self.tag + branch = self.branch + sha1 = self.sha1 + + def warn(attrname): + names = ('ref', 'tag', 'branch', 'sha1') + vars = (ref, tag, branch, sha1) + # filter(None,) filters for truth + if sum(1 for _ in vars if _) > 1: + log.warning( + "More than one of ref, tag, branch, or sha1 supplied; " + "using %s", + attrname + ) + for n, v in zip(names, vars): + log.info('%s: %s' % (n, v)) + + if ref: + warn('ref') + return dict(ref=ref) + elif tag: + warn('tag') + return dict(tag=tag) + elif branch: + warn('branch') + return dict(branch=branch) + elif sha1: + warn('sha1') + return dict(sha1=sha1) + else: + log.warning("defaulting to main branch") + return dict(branch='main') + + def _get_base_url(self): + """ + Figures out which package repo base URL to use. + """ + template = config.baseurl_template + # get distro name and arch + base_url = template.format( + host=config.gitbuilder_host, + proj=self.project, + pkg_type=self.pkg_type, + arch=self.arch, + dist=self.distro, + flavor=self.flavor, + uri=self.uri_reference, + ) + return base_url + + def _get_package_version(self): + """ + Look for, and parse, a file called 'version' in base_url. + """ + url = "{0}/version".format(self.base_url) + log.info("Looking for package version: {0}".format(url)) + # will loop and retry until a 200 is returned or the retry + # limits are reached + resp = _get_response(url, wait=self.job_config.get("wait_for_package", False)) + + if not resp.ok: + raise VersionNotFoundError(url) + version = resp.text.strip().lstrip('v') + log.info("Found version: {0}".format(version)) + return version + + def _get_package_sha1(self): + """ + Look for, and parse, a file called 'sha1' in base_url. + """ + url = "{0}/sha1".format(self.base_url) + log.info("Looking for package sha1: {0}".format(url)) + resp = requests.get(url) + sha1 = None + if not resp.ok: + # TODO: maybe we should have this retry a few times? + log.error( + 'Package sha1 was not there (got HTTP code %s)...', + resp.status_code, + ) + else: + sha1 = resp.text.strip() + log.info("Found sha1: {0}".format(sha1)) + + return sha1 + + def install_repo(self): + """ + Install the .repo file or sources.list fragment on self.remote if there + is one. If not, raises an exception + """ + if not self.remote: + raise NoRemoteError() + if self.remote.os.package_type == 'rpm': + self._install_rpm_repo() + elif self.remote.os.package_type == 'deb': + self._install_deb_repo() + + def _install_rpm_repo(self): + dist_release = self.dist_release + project = self.project + proj_release = \ + '{proj}-release-{release}.{dist_release}.noarch'.format( + proj=project, release=self.rpm_release, + dist_release=dist_release + ) + rpm_name = "{rpm_nm}.rpm".format(rpm_nm=proj_release) + url = "{base_url}/noarch/{rpm_name}".format( + base_url=self.base_url, rpm_name=rpm_name) + if dist_release in ['opensuse', 'sle']: + url = "{base_url}/{arch}".format( + base_url=self.base_url, arch=self.arch) + self.remote.run(args=[ + 'sudo', 'zypper', '-n', 'addrepo', '--refresh', '--no-gpgcheck', + '-p', '1', url, 'ceph-rpm-under-test', + ]) + else: + self.remote.run(args=['sudo', 'yum', '-y', 'install', url]) + + def _install_deb_repo(self): + self.remote.run( + args=[ + 'echo', 'deb', self.base_url, self.codename, 'main', + Raw('|'), + 'sudo', 'tee', + '/etc/apt/sources.list.d/{proj}.list'.format( + proj=self.project), + ], + stdout=StringIO(), + ) + + def remove_repo(self): + """ + Remove the .repo file or sources.list fragment on self.remote if there + is one. If not, raises an exception + """ + if not self.remote: + raise NoRemoteError() + if self.remote.os.package_type == 'rpm': + self._remove_rpm_repo() + elif self.remote.os.package_type == 'deb': + self._remove_deb_repo() + + def _remove_rpm_repo(self): + if self.dist_release in ['opensuse', 'sle']: + self.remote.run(args=[ + 'sudo', 'zypper', '-n', 'removerepo', 'ceph-rpm-under-test' + ]) + else: + remove_package('%s-release' % self.project, self.remote) + + def _remove_deb_repo(self): + self.remote.run( + args=[ + 'sudo', + 'rm', '-f', + '/etc/apt/sources.list.d/{proj}.list'.format( + proj=self.project), + ] + ) + + +class ShamanProject(GitbuilderProject): + def __init__(self, project, job_config, ctx=None, remote=None): + super(ShamanProject, self).__init__(project, job_config, ctx, remote) + self.query_url = 'https://%s/api/' % config.shaman_host + + # Force to use the "noarch" instead to build the uri. + self.force_noarch = self.job_config.get("shaman", {}).get("force_noarch", False) + + def _get_base_url(self): + self.assert_result() + return self._result.json()[0]['url'] + + @property + def _result(self): + if getattr(self, '_result_obj', None) is None: + self._result_obj = self._search() + return self._result_obj + + def _search(self): + uri = self._search_uri + log.debug("Querying %s", uri) + resp = requests.get( + uri, + headers={'content-type': 'application/json'}, + ) + resp.raise_for_status() + return resp + + @property + def _search_uri(self): + flavor = self.flavor + req_obj = OrderedDict() + req_obj['status'] = 'ready' + req_obj['project'] = self.project + req_obj['flavor'] = flavor + arch = "noarch" if self.force_noarch else self.arch + req_obj['distros'] = '%s/%s' % (self.distro, arch) + ref_name, ref_val = list(self._choose_reference().items())[0] + if ref_name == 'tag': + req_obj['sha1'] = self._sha1 = self._tag_to_sha1() + elif ref_name == 'sha1': + req_obj['sha1'] = ref_val + else: + req_obj['ref'] = ref_val + req_str = urlencode(req_obj) + uri = urljoin( + self.query_url, + 'search', + ) + '?%s' % req_str + return uri + + def _tag_to_sha1(self): + """ + Shaman doesn't know about tags. Use git ls-remote to query the remote + repo in order to map tags to their sha1 value. + + This method will also retry against ceph.git if the original request + uses ceph-ci.git and fails. + """ + def get_sha1(url): + # Ceph (and other projects) uses annotated tags for releases. This + # has the side-effect of making git ls-remote return the sha1 for + # the annotated tag object and not the last "real" commit in that + # tag. By contrast, when a person (or a build system) issues a + # "git checkout " command, HEAD will be the last "real" commit + # and not the tag. + # Below we have to append "^{}" to the tag value to work around + # this in order to query for the sha1 that the build system uses. + return repo_utils.ls_remote(url, "%s^{}" % self.tag) + + git_url = repo_utils.build_git_url(self.project) + result = get_sha1(git_url) + # For upgrade tests that are otherwise using ceph-ci.git, we need to + # also look in ceph.git to lookup released tags. + if result is None and 'ceph-ci' in git_url: + alt_git_url = git_url.replace('ceph-ci', 'ceph') + log.info( + "Tag '%s' not found in %s; will also look in %s", + self.tag, + git_url, + alt_git_url, + ) + result = get_sha1(alt_git_url) + + if result is None: + raise CommitNotFoundError(self.tag, git_url) + return result + + def assert_result(self): + if len(self._result.json()) == 0: + raise VersionNotFoundError(self._result.url) + + @classmethod + def _get_distro(cls, distro=None, version=None, codename=None): + if distro in ('centos', 'rhel'): + distro = 'centos' + version = cls._parse_version(version) + return "%s/%s" % (distro, version) + + def _get_package_sha1(self): + # This doesn't raise because GitbuilderProject._get_package_sha1() + # doesn't either. + if not len(self._result.json()): + log.error("sha1 not found: %s", self._result.url) + else: + return self._result.json()[0]['sha1'] + + def _get_package_version(self): + self.assert_result() + return self._result.json()[0]['extra']['package_manager_version'] + + @property + def scm_version(self): + self.assert_result() + return self._result.json()[0]['extra']['version'] + + @property + def repo_url(self): + self.assert_result() + return urljoin( + self._result.json()[0]['chacra_url'], + 'repo', + ) + + @property + def build_complete(self): + # use the repo search results to get a ref and a sha1; the + # input to teuthology-suite doesn't contain both + try: + self.assert_result() + except VersionNotFoundError: + return False + + # self._result has status, project, flavor, distros, arch, and sha1 + # restrictions, so the only reason for multiples should be "multiple + # builds of the same sha1 etc."; the first entry is the newest + search_result = self._result.json()[0] + + # now look for the build complete status + path = '/'.join( + ('builds/ceph', search_result['ref'], search_result['sha1']) + ) + build_url = urljoin(self.query_url, path) + + try: + resp = requests.get(build_url) + resp.raise_for_status() + except requests.HttpError: + return False + log.debug(f'looking for {self.distro} {self.arch} {self.flavor}') + for build in resp.json(): + log.debug(f'build: {build["distro"]}/{build["distro_version"]} {build["distro_arch"]} {build["flavor"]}') + if ( + # we must compare build arch to self.arch, since shaman's + # results can have multiple arches but we're searching + # for precisely one here + build['distro'] == search_result['distro'] and + build['distro_version'] == search_result['distro_version'] and + build['flavor'] == search_result['flavor'] and + build['distro_arch'] == self.arch + ): + return build['status'] == 'completed' + return False + + def _get_repo(self): + resp = requests.get(self.repo_url) + resp.raise_for_status() + return str(resp.text) + + def _install_rpm_repo(self): + dist_release = self.dist_release + repo = self._get_repo() + if dist_release in ['opensuse', 'sle']: + log.info("Writing zypper repo:\n{}".format(repo)) + sudo_write_file( + self.remote, + '/etc/zypp/repos.d/{proj}.repo'.format(proj=self.project), + repo, + ) + else: + log.info("Writing yum repo:\n{}".format(repo)) + sudo_write_file( + self.remote, + '/etc/yum.repos.d/{proj}.repo'.format(proj=self.project), + repo, + ) + + def _install_deb_repo(self): + repo = self._get_repo() + sudo_write_file( + self.remote, + '/etc/apt/sources.list.d/{proj}.list'.format( + proj=self.project), + repo, + ) + + def _remove_rpm_repo(self): + # FIXME: zypper + self.remote.run( + args=[ + 'sudo', + 'rm', '-f', + '/etc/yum.repos.d/{proj}.repo'.format(proj=self.project), + ] + ) + + +def get_builder_project(): + """ + Depending on whether config.use_shaman is True or False, return + GitbuilderProject or ShamanProject (the class, not an instance). + """ + if config.use_shaman is True: + builder_class = ShamanProject + else: + builder_class = GitbuilderProject + return builder_class diff --git a/teuthology/parallel.py b/teuthology/parallel.py new file mode 100644 index 0000000000..0a7d3ab35a --- /dev/null +++ b/teuthology/parallel.py @@ -0,0 +1,115 @@ +import logging +import sys + +import gevent +import gevent.pool +import gevent.queue + + +log = logging.getLogger(__name__) + + +class ExceptionHolder(object): + def __init__(self, exc_info): + self.exc_info = exc_info + + +def capture_traceback(func, *args, **kwargs): + """ + Utility function to capture tracebacks of any exception func + raises. + """ + try: + return func(*args, **kwargs) + except Exception: + return ExceptionHolder(sys.exc_info()) + + +def resurrect_traceback(exc): + if isinstance(exc, ExceptionHolder): + raise exc.exc_info[1] + elif isinstance(exc, BaseException): + raise exc + else: + return + + +class parallel(object): + """ + This class is a context manager for running functions in parallel. + + You add functions to be run with the spawn method:: + + with parallel() as p: + for foo in bar: + p.spawn(quux, foo, baz=True) + + You can iterate over the results (which are in arbitrary order):: + + with parallel() as p: + for foo in bar: + p.spawn(quux, foo, baz=True) + for result in p: + print(result) + + If one of the spawned functions throws an exception, it will be thrown + when iterating over the results, or when the with block ends. + + At the end of the with block, the main thread waits until all + spawned functions have completed, or, if one exited with an exception, + kills the rest and raises the exception. + """ + + def __init__(self): + self.group = gevent.pool.Group() + self.results = gevent.queue.Queue() + self.count = 0 + self.any_spawned = False + self.iteration_stopped = False + + def spawn(self, func, *args, **kwargs): + self.count += 1 + self.any_spawned = True + greenlet = self.group.spawn(capture_traceback, func, *args, **kwargs) + greenlet.link(self._finish) + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + if value is not None: + return False + + # raises if any greenlets exited with an exception + for result in self: + log.debug('result is %s', repr(result)) + + return True + + def __iter__(self): + return self + + def __next__(self): + if not self.any_spawned or self.iteration_stopped: + raise StopIteration() + result = self.results.get() + + try: + resurrect_traceback(result) + except StopIteration: + self.iteration_stopped = True + raise + + return result + + next = __next__ + + def _finish(self, greenlet): + if greenlet.successful(): + self.results.put(greenlet.value) + else: + self.results.put(greenlet.exception) + + self.count -= 1 + if self.count <= 0: + self.results.put(StopIteration()) diff --git a/teuthology/provision/__init__.py b/teuthology/provision/__init__.py new file mode 100644 index 0000000000..325f2c34bf --- /dev/null +++ b/teuthology/provision/__init__.py @@ -0,0 +1,121 @@ +import logging + +import teuthology.lock.query +from teuthology.misc import decanonicalize_hostname, get_distro, get_distro_version + +from teuthology.provision import cloud +from teuthology.provision import downburst +from teuthology.provision import fog +from teuthology.provision import openstack +from teuthology.provision import pelagos +import os + +log = logging.getLogger(__name__) + + +def _logfile(ctx, shortname): + if hasattr(ctx, 'config') and ctx.config.get('archive_path'): + return os.path.join(ctx.config['archive_path'], + shortname + '.downburst.log') + +def get_reimage_types(): + return pelagos.get_types() + fog.get_types() + +def reimage(ctx, machine_name, machine_type): + os_type = get_distro(ctx) + os_version = get_distro_version(ctx) + + pelagos_types = pelagos.get_types() + fog_types = fog.get_types() + if machine_type in pelagos_types and machine_type in fog_types: + raise Exception('machine_type can be used with one provisioner only') + elif machine_type in pelagos_types: + obj = pelagos.Pelagos(machine_name, os_type, os_version) + elif machine_type in fog_types: + obj = fog.FOG(machine_name, os_type, os_version) + else: + raise Exception("The machine_type '%s' is not known to any " + "of configured provisioners" % machine_type) + return obj.create() + + +def create_if_vm(ctx, machine_name, _downburst=None): + """ + Use downburst to create a virtual machine + + :param _downburst: Only used for unit testing. + """ + if _downburst: + status_info = _downburst.status + else: + status_info = teuthology.lock.query.get_status(machine_name) + shortname = decanonicalize_hostname(machine_name) + machine_type = status_info['machine_type'] + os_type = get_distro(ctx) + os_version = get_distro_version(ctx) + if not teuthology.lock.query.is_vm(status=status_info): + return False + + if machine_type in cloud.get_types(): + return cloud.get_provisioner( + machine_type, + shortname, + os_type, + os_version, + conf=getattr(ctx, 'config', dict()), + ).create() + + has_config = hasattr(ctx, 'config') and ctx.config is not None + if has_config and 'downburst' in ctx.config: + log.warning( + 'Usage of a custom downburst config has been deprecated.' + ) + + dbrst = _downburst or \ + downburst.Downburst(name=machine_name, os_type=os_type, + os_version=os_version, status=status_info, + logfile=_logfile(ctx, shortname)) + return dbrst.create() + + +def destroy_if_vm(ctx, machine_name, user=None, description=None, + _downburst=None): + """ + Use downburst to destroy a virtual machine + + Return False only on vm downburst failures. + + :param _downburst: Only used for unit testing. + """ + if _downburst: + status_info = _downburst.status + else: + status_info = teuthology.lock.query.get_status(machine_name) + if not status_info or not teuthology.lock.query.is_vm(status=status_info): + return True + if user is not None and user != status_info['locked_by']: + msg = "Tried to destroy {node} as {as_user} but it is locked " + \ + "by {locked_by}" + log.error(msg.format(node=machine_name, as_user=user, + locked_by=status_info['locked_by'])) + return False + if (description is not None and description != + status_info['description']): + msg = "Tried to destroy {node} with description {desc_arg} " + \ + "but it is locked with description {desc_lock}" + log.error(msg.format(node=machine_name, desc_arg=description, + desc_lock=status_info['description'])) + return False + machine_type = status_info.get('machine_type') + shortname = decanonicalize_hostname(machine_name) + if machine_type == 'openstack': + return openstack.ProvisionOpenStack().destroy(shortname) + elif machine_type in cloud.get_types(): + return cloud.get_provisioner( + machine_type, shortname, None, None).destroy() + + dbrst = _downburst or \ + downburst.Downburst(name=machine_name, os_type=None, + os_version=None, status=status_info, + logfile=_logfile(ctx, shortname)) + return dbrst.destroy() diff --git a/teuthology/provision/cloud/__init__.py b/teuthology/provision/cloud/__init__.py new file mode 100644 index 0000000000..d30ad3338c --- /dev/null +++ b/teuthology/provision/cloud/__init__.py @@ -0,0 +1,49 @@ +import logging + +from teuthology.config import config + +from teuthology.provision.cloud import openstack + +log = logging.getLogger(__name__) + + +supported_drivers = dict( + openstack=dict( + provider=openstack.OpenStackProvider, + provisioner=openstack.OpenStackProvisioner, + ), +) + + +def get_types(): + types = list() + if 'libcloud' in config and 'providers' in config.libcloud: + types = list(config.libcloud['providers'].keys()) + return types + + +def get_provider_conf(node_type): + all_providers = config.libcloud['providers'] + provider_conf = all_providers[node_type] + return provider_conf + + +def get_provider(node_type): + provider_conf = get_provider_conf(node_type) + driver = provider_conf['driver'] + provider_cls = supported_drivers[driver]['provider'] + return provider_cls(name=node_type, conf=provider_conf) + + +def get_provisioner(node_type, name, os_type, os_version, conf=None): + provider = get_provider(node_type) + provider_conf = get_provider_conf(node_type) + driver = provider_conf['driver'] + provisioner_cls = supported_drivers[driver]['provisioner'] + return provisioner_cls( + provider=provider, + name=name, + os_type=os_type, + os_version=os_version, + conf=conf, + ) diff --git a/teuthology/provision/cloud/base.py b/teuthology/provision/cloud/base.py new file mode 100644 index 0000000000..1700fa9ed4 --- /dev/null +++ b/teuthology/provision/cloud/base.py @@ -0,0 +1,89 @@ +import logging +from copy import deepcopy + +from libcloud.compute.providers import get_driver +from libcloud.compute.types import Provider as lc_Provider + +import teuthology.orchestra.remote +import teuthology.provision.cloud +from teuthology.misc import canonicalize_hostname, decanonicalize_hostname + +log = logging.getLogger(__name__) + + +class Provider(object): + _driver_posargs = list() + + def __init__(self, name, conf): + self.name = name + self.conf = conf + self.driver_name = self.conf['driver'] + + def _get_driver(self): + driver_type = get_driver( + getattr(lc_Provider, self.driver_name.upper()) + ) + driver_args = self._get_driver_args() + driver = driver_type( + *[driver_args.pop(arg_name) for arg_name in self._driver_posargs], + **driver_args + ) + return driver + driver = property(fget=_get_driver) + + def _get_driver_args(self): + return deepcopy(self.conf['driver_args']) + + +class Provisioner(object): + def __init__( + self, provider, name, os_type=None, os_version=None, + conf=None, user='ubuntu', + ): + if isinstance(provider, str): + provider = teuthology.provision.cloud.get_provider(provider) + self.provider = provider + self.name = decanonicalize_hostname(name) + self.hostname = canonicalize_hostname(name, user=None) + self.os_type = os_type + self.os_version = os_version + self.user = user + + def create(self): + try: + return self._create() + except Exception: + log.exception("Failed to create %s", self.name) + return False + + def _create(self): + pass + + def destroy(self): + try: + return self._destroy() + except Exception: + log.exception("Failed to destroy %s", self.name) + return False + + def _destroy(self): + pass + + @property + def remote(self): + if not hasattr(self, '_remote'): + self._remote = teuthology.orchestra.remote.Remote( + "%s@%s" % (self.user, self.name), + ) + return self._remote + + def __repr__(self): + template = "%s(provider='%s', name='%s', os_type='%s', " \ + "os_version='%s')" + return template % ( + self.__class__.__name__, + self.provider.name, + self.name, + self.os_type, + self.os_version, + ) diff --git a/teuthology/provision/cloud/openstack.py b/teuthology/provision/cloud/openstack.py new file mode 100644 index 0000000000..d8b838b13e --- /dev/null +++ b/teuthology/provision/cloud/openstack.py @@ -0,0 +1,452 @@ +import logging +import re +import requests +import socket +import time +import yaml + +from teuthology.util.compat import urlencode + +from copy import deepcopy +from libcloud.common.exceptions import RateLimitReachedError, BaseHTTPError + +from paramiko import AuthenticationException +from paramiko.ssh_exception import NoValidConnectionsError + +from teuthology.config import config +from teuthology.contextutil import safe_while + +from teuthology.provision.cloud import base +from teuthology.provision.cloud import util +from teuthology.provision.cloud.base import Provider + + +log = logging.getLogger(__name__) + + +RETRY_EXCEPTIONS = (RateLimitReachedError, BaseHTTPError) + + +def retry(function, *args, **kwargs): + """ + Call a function (returning its results), retrying if any of the exceptions + in RETRY_EXCEPTIONS are raised + """ + with safe_while(sleep=1, tries=24, increment=1) as proceed: + tries = 0 + while proceed(): + tries += 1 + try: + result = function(*args, **kwargs) + if tries > 1: + log.debug( + "'%s' succeeded after %s tries", + function.__name__, + tries, + ) + return result + except RETRY_EXCEPTIONS: + pass + + +class OpenStackProvider(Provider): + _driver_posargs = ['username', 'password'] + + def _get_driver(self): + self._auth_token = util.AuthToken(name='teuthology_%s' % self.name) + with self._auth_token as token: + driver = super(OpenStackProvider, self)._get_driver() + # We must apparently call get_service_catalog() so that + # get_endpoint() works. + driver.connection.get_service_catalog() + if not token.value: + token.write( + driver.connection.auth_token, + driver.connection.auth_token_expires, + driver.connection.get_endpoint(), + ) + return driver + driver = property(fget=_get_driver) + + def _get_driver_args(self): + driver_args = super(OpenStackProvider, self)._get_driver_args() + if self._auth_token.value: + driver_args['ex_force_auth_token'] = self._auth_token.value + driver_args['ex_force_base_url'] = self._auth_token.endpoint + return driver_args + + @property + def ssh_interface(self): + if not hasattr(self, '_ssh_interface'): + self._ssh_interface = self.conf.get('ssh_interface', 'public_ips') + return self._ssh_interface + + @property + def images(self): + if not hasattr(self, '_images'): + exclude_image = self.conf.get('exclude_image', []) + if exclude_image and not isinstance(exclude_image, list): + exclude_image = [exclude_image] + exclude_re = [re.compile(x) for x in exclude_image] + images = retry(self.driver.list_images) + self._images = [_ for _ in images + if not any(x.match(_.name) for x in exclude_re)] + return self._images + + @property + def sizes(self): + if not hasattr(self, '_sizes'): + allow_sizes = self.conf.get('allow_sizes', '.*') + if not isinstance(allow_sizes, list): + allow_sizes = [allow_sizes] + allow_re = [re.compile(x) for x in allow_sizes] + # By default, exclude instance types meant for Windows + exclude_sizes = self.conf.get('exclude_sizes', 'win-.*') + if not isinstance(exclude_sizes, list): + exclude_sizes = [exclude_sizes] + exclude_re = [re.compile(x) for x in exclude_sizes] + sizes = retry(self.driver.list_sizes) + self._sizes = list(filter( + lambda s: + any(x.match(s.name) for x in allow_re) + and not + all(x.match(s.name) for x in exclude_re), + sizes + )) + return self._sizes + + @property + def networks(self): + if not hasattr(self, '_networks'): + allow_networks = self.conf.get('allow_networks', '.*') + if not isinstance(allow_networks, list): + allow_networks=[allow_networks] + networks_re = [re.compile(x) for x in allow_networks] + try: + networks = retry(self.driver.ex_list_networks) + if networks: + self._networks = filter( + lambda s: any(x.match(s.name) for x in networks_re), + networks + ) + else: + self._networks = list() + except AttributeError: + log.warning("Unable to list networks for %s", self.driver) + self._networks = list() + return self._networks + + @property + def default_userdata(self): + if not hasattr(self, '_default_userdata'): + self._default_userdata = self.conf.get('userdata', dict()) + return self._default_userdata + + @property + def security_groups(self): + if not hasattr(self, '_security_groups'): + try: + self._security_groups = retry( + self.driver.ex_list_security_groups + ) + except AttributeError: + log.warning("Unable to list security groups for %s", self.driver) + self._security_groups = list() + return self._security_groups + + +class OpenStackProvisioner(base.Provisioner): + _sentinel_path = '/.teuth_provisioned' + + defaults = dict( + openstack=dict( + machine=dict( + disk=20, + ram=8000, + cpus=1, + ), + volumes=dict( + count=0, + size=0, + ), + ) + ) + + def __init__( + self, + provider, name, os_type=None, os_version=None, + conf=None, + user='ubuntu', + ): + super(OpenStackProvisioner, self).__init__( + provider, name, os_type, os_version, conf=conf, user=user, + ) + self._read_conf(conf) + + def _read_conf(self, conf=None): + """ + Looks through the following in order: + + the 'conf' arg + conf[DRIVER_NAME] + teuthology.config.config.DRIVER_NAME + self.defaults[DRIVER_NAME] + + It will use the highest value for each of the following: disk, RAM, + cpu, volume size and count + + The resulting configuration becomes the new instance configuration + and is stored as self.conf + + :param conf: The instance configuration + + :return: None + """ + driver_name = self.provider.driver_name.lower() + full_conf = conf or dict() + driver_conf = full_conf.get(driver_name, dict()) + legacy_conf = getattr(config, driver_name) or dict() + defaults = self.defaults.get(driver_name, dict()) + confs = list() + for obj in (full_conf, driver_conf, legacy_conf, defaults): + obj = deepcopy(obj) + if isinstance(obj, list): + confs.extend(obj) + else: + confs.append(obj) + self.conf = util.combine_dicts(confs, lambda x, y: x > y) + + def _create(self): + userdata = self.userdata + log.debug("Creating node: %s", self) + log.debug("Selected size: %s", self.size) + log.debug("Selected image: %s", self.image) + log.debug("Using userdata: %s", userdata) + create_args = dict( + name=self.name, + size=self.size, + image=self.image, + ex_userdata=userdata, + ) + networks = self.provider.networks + if networks: + create_args['networks'] = networks + security_groups = self.security_groups + if security_groups: + create_args['ex_security_groups'] = security_groups + self._node = retry( + self.provider.driver.create_node, + **create_args + ) + log.debug("Created node: %s", self.node) + results = retry( + self.provider.driver.wait_until_running, + nodes=[self.node], + ssh_interface=self.provider.ssh_interface, + ) + self._node, self.ips = results[0] + log.debug("Node started: %s", self.node) + if not self._create_volumes(): + self._destroy_volumes() + return False + self._update_dns() + # Give cloud-init a few seconds to bring up the network, start sshd, + # and install the public key + time.sleep(20) + self._wait_for_ready() + return self.node + + def _create_volumes(self): + vol_count = self.conf['volumes']['count'] + vol_size = self.conf['volumes']['size'] + name_templ = "%s_%0{0}d".format(len(str(vol_count - 1))) + vol_names = [name_templ % (self.name, i) + for i in range(vol_count)] + try: + for name in vol_names: + volume = retry( + self.provider.driver.create_volume, + vol_size, + name, + ) + log.info("Created volume %s", volume) + retry( + self.provider.driver.attach_volume, + self.node, + volume, + device=None, + ) + except Exception: + log.exception("Failed to create or attach volume!") + return False + return True + + def _destroy_volumes(self): + all_volumes = retry(self.provider.driver.list_volumes) + our_volumes = [vol for vol in all_volumes + if vol.name.startswith("%s_" % self.name)] + for vol in our_volumes: + try: + retry(self.provider.driver.detach_volume, vol) + except Exception: + log.exception("Could not detach volume %s", vol) + try: + retry(self.provider.driver.destroy_volume, vol) + except Exception: + log.exception("Could not destroy volume %s", vol) + + def _update_dns(self): + query = urlencode(dict( + name=self.name, + ip=self.ips[0], + )) + nsupdate_url = "%s?%s" % ( + config.nsupdate_url, + query, + ) + resp = requests.get(nsupdate_url) + resp.raise_for_status() + + def _wait_for_ready(self): + with safe_while(sleep=6, tries=20) as proceed: + while proceed(): + try: + self.remote.connect() + break + except ( + socket.error, + NoValidConnectionsError, + AuthenticationException, + ): + pass + cmd = "while [ ! -e '%s' ]; do sleep 5; done" % self._sentinel_path + self.remote.run(args=cmd, timeout=600) + log.info("Node is ready: %s", self.node) + + @property + def image(self): + os_specs = [ + '{os_type} {os_version}', + '{os_type}-{os_version}', + ] + for spec in os_specs: + matches = [image for image in self.provider.images + if spec.format( + os_type=self.os_type, + os_version=self.os_version, + ) in image.name.lower()] + if matches: + break + if not matches: + raise RuntimeError( + "Could not find an image for %s %s" % + (self.os_type, self.os_version)) + return matches[0] + + @property + def size(self): + ram = self.conf['machine']['ram'] + disk = self.conf['machine']['disk'] + cpu = self.conf['machine']['cpus'] + + def good_size(size): + if (size.ram < ram or size.disk < disk or size.vcpus < cpu): + return False + return True + + all_sizes = self.provider.sizes + good_sizes = filter(good_size, all_sizes) + smallest_match = sorted( + good_sizes, + key=lambda s: (s.ram, s.disk, s.vcpus) + )[0] + return smallest_match + + @property + def security_groups(self): + group_names = self.provider.conf.get('security_groups') + if group_names is None: + return + result = list() + groups = self.provider.security_groups + for name in group_names: + matches = [group for group in groups if group.name == name] + if not matches: + msg = "No security groups found with name '%s'" + elif len(matches) > 1: + msg = "More than one security group found with name '%s'" + elif len(matches) == 1: + result.append(matches[0]) + continue + raise RuntimeError(msg % name) + return result + + @property + def userdata(self): + spec="{t}-{v}".format(t=self.os_type, + v=self.os_version) + base_config = dict( + packages=[ + 'git', + 'wget', + 'python', + 'ntp', + ], + ) + runcmd=[ + # Remove the user's password so that console logins are + # possible + ['passwd', '-d', self.user], + ['touch', self._sentinel_path] + ] + if spec in self.provider.default_userdata: + base_config = deepcopy( + self.provider.default_userdata.get(spec, dict())) + base_config.update(user=self.user) + if 'manage_etc_hosts' not in base_config: + base_config.update( + manage_etc_hosts=True, + hostname=self.hostname, + ) + base_config['runcmd'] = base_config.get('runcmd', list()) + base_config['runcmd'].extend(runcmd) + ssh_pubkey = util.get_user_ssh_pubkey() + if ssh_pubkey: + authorized_keys = base_config.get('ssh_authorized_keys', list()) + authorized_keys.append(ssh_pubkey) + base_config['ssh_authorized_keys'] = authorized_keys + user_str = "#cloud-config\n" + yaml.safe_dump(base_config) + return user_str + + @property + def node(self): + if hasattr(self, '_node'): + return self._node + matches = self._find_nodes() + msg = "Unknown error locating %s" + if not matches: + msg = "No nodes found with name '%s'" % self.name + log.warning(msg) + return + elif len(matches) > 1: + msg = "More than one node found with name '%s'" + elif len(matches) == 1: + self._node = matches[0] + return self._node + raise RuntimeError(msg % self.name) + + def _find_nodes(self): + nodes = retry(self.provider.driver.list_nodes) + matches = [node for node in nodes if node.name == self.name] + return matches + + def _destroy(self): + self._destroy_volumes() + nodes = self._find_nodes() + if not nodes: + log.warning("Didn't find any nodes named '%s' to destroy!", self.name) + return True + if len(nodes) > 1: + log.warning("Found multiple nodes named '%s' to destroy!", self.name) + log.info("Destroying nodes: %s", nodes) + return all([node.destroy() for node in nodes]) diff --git a/teuthology/provision/cloud/test/test_base.py b/teuthology/provision/cloud/test/test_base.py new file mode 100644 index 0000000000..b1ef29904b --- /dev/null +++ b/teuthology/provision/cloud/test/test_base.py @@ -0,0 +1,90 @@ +from libcloud.compute.providers import get_driver +from mock import patch + +from teuthology.config import config +from teuthology.provision import cloud + +from test_cloud_init import dummy_config, dummy_drivers + + +class TestBase(object): + def setup(self): + config.load() + config.libcloud = dummy_config + cloud.supported_drivers['dummy'] = dummy_drivers + + def teardown(self): + del cloud.supported_drivers['dummy'] + + +class TestProvider(TestBase): + def test_init(self): + obj = cloud.get_provider('my_provider') + assert obj.name == 'my_provider' + assert obj.driver_name == 'dummy' + assert obj.conf == dummy_config['providers']['my_provider'] + + def test_driver(self): + obj = cloud.get_provider('my_provider') + assert isinstance(obj.driver, get_driver('dummy')) + + +class TestProvisioner(TestBase): + klass = cloud.base.Provisioner + + def get_obj( + self, name='node_name', os_type='ubuntu', os_version='ubuntu'): + return cloud.get_provisioner( + 'my_provider', + 'node_name', + 'ubuntu', + '16.04', + ) + + def test_init_provider_string(self): + obj = self.klass('my_provider', 'ubuntu', '16.04') + assert obj.provider.name == 'my_provider' + + def test_create(self): + obj = self.get_obj() + with patch.object( + self.klass, + '_create', + ) as m_create: + for val in [True, False]: + m_create.return_value = val + res = obj.create() + assert res is val + m_create.assert_called_once_with() + m_create.reset_mock() + m_create.side_effect = RuntimeError + res = obj.create() + assert res is False + assert obj.create() is None + + def test_destroy(self): + obj = self.get_obj() + with patch.object( + self.klass, + '_destroy', + ) as m_destroy: + for val in [True, False]: + m_destroy.return_value = val + res = obj.destroy() + assert res is val + m_destroy.assert_called_once_with() + m_destroy.reset_mock() + m_destroy.side_effect = RuntimeError + res = obj.destroy() + assert res is False + assert obj.destroy() is None + + def test_remote(self): + obj = self.get_obj() + assert obj.remote.shortname == 'node_name' + + def test_repr(self): + obj = self.get_obj() + assert repr(obj) == \ + "Provisioner(provider='my_provider', name='node_name', os_type='ubuntu', os_version='16.04')" # noqa + diff --git a/teuthology/provision/cloud/test/test_cloud_init.py b/teuthology/provision/cloud/test/test_cloud_init.py new file mode 100644 index 0000000000..fdee723e6c --- /dev/null +++ b/teuthology/provision/cloud/test/test_cloud_init.py @@ -0,0 +1,60 @@ +from teuthology.config import config +from teuthology.provision import cloud + +dummy_config = dict( + providers=dict( + my_provider=dict( + driver='dummy', + driver_args=dict( + creds=0, + ), + conf_1='1', + conf_2='2', + ) + ) +) + + +class DummyProvider(cloud.base.Provider): + # For libcloud's dummy driver + _driver_posargs = ['creds'] + +dummy_drivers = dict( + provider=DummyProvider, + provisioner=cloud.base.Provisioner, +) + + +class TestInit(object): + def setup(self): + config.load() + config.libcloud = dummy_config + cloud.supported_drivers['dummy'] = dummy_drivers + + def teardown(self): + del cloud.supported_drivers['dummy'] + + def test_get_types(self): + assert list(cloud.get_types()) == ['my_provider'] + + def test_get_provider_conf(self): + expected = dummy_config['providers']['my_provider'] + assert cloud.get_provider_conf('my_provider') == expected + + def test_get_provider(self): + obj = cloud.get_provider('my_provider') + assert obj.name == 'my_provider' + assert obj.driver_name == 'dummy' + + def test_get_provisioner(self): + obj = cloud.get_provisioner( + 'my_provider', + 'node_name', + 'ubuntu', + '16.04', + dict(foo='bar'), + ) + assert obj.provider.name == 'my_provider' + assert obj.name == 'node_name' + assert obj.os_type == 'ubuntu' + assert obj.os_version == '16.04' diff --git a/teuthology/provision/cloud/test/test_cloud_util.py b/teuthology/provision/cloud/test/test_cloud_util.py new file mode 100644 index 0000000000..2f6035a7ce --- /dev/null +++ b/teuthology/provision/cloud/test/test_cloud_util.py @@ -0,0 +1,172 @@ +import datetime +import dateutil +import json + +from mock import patch, mock_open +from pytest import mark + +from teuthology.provision.cloud import util + + +@mark.parametrize( + 'path, exists', + [ + ('/fake/path', True), + ('/fake/path', False), + ] +) +def test_get_user_ssh_pubkey(path, exists): + with patch('os.path.exists') as m_exists: + m_exists.return_value = exists + with patch('teuthology.provision.cloud.util.open', mock_open(), create=True) as m_open: + util.get_user_ssh_pubkey(path) + if exists: + m_open.assert_called_once_with(path) + + +@mark.parametrize( + 'input_, func, expected', + [ + [ + [ + dict(sub0=dict(key0=0, key1=0)), + dict(sub0=dict(key1=1, key2=2)), + ], + lambda x, y: x > y, + dict(sub0=dict(key0=0, key1=1, key2=2)) + ], + [ + [ + dict(), + dict(sub0=dict(key1=1, key2=2)), + ], + lambda x, y: x > y, + dict(sub0=dict(key1=1, key2=2)) + ], + [ + [ + dict(sub0=dict(key1=1, key2=2)), + dict(), + ], + lambda x, y: x > y, + dict(sub0=dict(key1=1, key2=2)) + ], + [ + [ + dict(sub0=dict(key0=0, key1=0, key2=0)), + dict(sub0=dict(key0=1, key2=3), sub1=dict(key0=0)), + dict(sub0=dict(key0=3, key1=2, key2=1)), + dict(sub0=dict(key1=3), + sub1=dict(key0=3, key1=0)), + ], + lambda x, y: x > y, + dict(sub0=dict(key0=3, key1=3, key2=3), + sub1=dict(key0=3, key1=0)) + ], + ] +) +def test_combine_dicts(input_, func, expected): + assert util.combine_dicts(input_, func) == expected + + +def get_datetime(offset_hours=0): + delta = datetime.timedelta(hours=offset_hours) + return datetime.datetime.now(dateutil.tz.tzutc()) + delta + + +def get_datetime_string(offset_hours=0): + obj = get_datetime(offset_hours) + return obj.strftime(util.AuthToken.time_format) + + +class TestAuthToken(object): + klass = util.AuthToken + + def setup(self): + default_expires = get_datetime_string(0) + self.test_data = dict( + value='token_value', + endpoint='endpoint', + expires=default_expires, + ) + self.patchers = dict() + self.patchers['m_open'] = patch( + 'teuthology.provision.cloud.util.open' + ) + self.patchers['m_exists'] = patch( + 'os.path.exists' + ) + self.patchers['m_file_lock'] = patch( + 'teuthology.provision.cloud.util.FileLock' + ) + self.mocks = dict() + for name, patcher in self.patchers.items(): + self.mocks[name] = patcher.start() + + def teardown(self): + for patcher in self.patchers.values(): + patcher.stop() + + def get_obj(self, name='name', directory='/fake/directory'): + return self.klass( + name=name, + directory=directory, + ) + + def test_no_token(self): + obj = self.get_obj() + self.mocks['m_exists'].return_value = False + with obj: + assert obj.value is None + assert obj.expired is True + + @mark.parametrize( + 'test_data, expired', + [ + [ + dict( + value='token_value', + endpoint='endpoint', + expires=get_datetime_string(-1), + ), + True + ], + [ + dict( + value='token_value', + endpoint='endpoint', + expires=get_datetime_string(1), + ), + False + ], + ] + ) + def test_token_read(self, test_data, expired): + obj = self.get_obj() + self.mocks['m_exists'].return_value = True + self.mocks['m_open'].return_value.__enter__.return_value.read.return_value = \ + json.dumps(test_data) + with obj: + if expired: + assert obj.value is None + assert obj.expired is True + else: + assert obj.value == test_data['value'] + + def test_token_write(self): + obj = self.get_obj() + datetime_obj = get_datetime(0) + datetime_string = get_datetime_string(0) + self.mocks['m_exists'].return_value = False + with obj: + obj.write('value', datetime_obj, 'endpoint') + m_open = self.mocks['m_open'] + write_calls = m_open.return_value.__enter__.return_value.write\ + .call_args_list + assert len(write_calls) == 1 + expected = json.dumps(dict( + value='value', + expires=datetime_string, + endpoint='endpoint', + )) + assert write_calls[0][0][0] == expected diff --git a/teuthology/provision/cloud/test/test_openstack.py b/teuthology/provision/cloud/test/test_openstack.py new file mode 100644 index 0000000000..108532ed56 --- /dev/null +++ b/teuthology/provision/cloud/test/test_openstack.py @@ -0,0 +1,781 @@ +import socket +import yaml +import os + +from teuthology.util.compat import parse_qs + +from copy import deepcopy +from libcloud.compute.providers import get_driver +from mock import patch, Mock, DEFAULT +from pytest import raises, mark + +from teuthology.config import config +from teuthology.exceptions import MaxWhileTries +from teuthology.provision import cloud + +test_config = dict( + providers=dict( + my_provider=dict( + driver='openstack', + driver_args=dict( + username='user', + password='password', + ex_force_auth_url='http://127.0.0.1:9999/v2.0/tokens', + ), + ), + image_exclude_provider=dict( + driver='openstack', + exclude_image=['.*-exclude1', '.*-exclude2'], + driver_args=dict( + username='user', + password='password', + ex_force_auth_url='http://127.0.0.1:9999/v2.0/tokens', + ), + ) + ) +) + + +@patch('time.sleep') +def test_retry(m_sleep): + orig_exceptions = cloud.openstack.RETRY_EXCEPTIONS + new_exceptions = orig_exceptions + (RuntimeError, ) + + class test_cls(object): + def __init__(self, min_val): + self.min_val = min_val + self.cur_val = 0 + + def func(self): + self.cur_val += 1 + if self.cur_val < self.min_val: + raise RuntimeError + return self.cur_val + + with patch.object( + cloud.openstack, + 'RETRY_EXCEPTIONS', + new=new_exceptions, + ): + test_obj = test_cls(min_val=5) + assert cloud.openstack.retry(test_obj.func) == 5 + test_obj = test_cls(min_val=1000) + with raises(MaxWhileTries): + cloud.openstack.retry(test_obj.func) + + +def get_fake_obj(mock_args=None, attributes=None): + if mock_args is None: + mock_args = dict() + if attributes is None: + attributes = dict() + obj = Mock(**mock_args) + for name, value in attributes.items(): + setattr(obj, name, value) + return obj + + +class TestOpenStackBase(object): + def setup(self): + config.load(dict(libcloud=deepcopy(test_config))) + self.start_patchers() + + def start_patchers(self): + self.patchers = dict() + self.patchers['m_list_images'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.list_images' + ) + self.patchers['m_list_sizes'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.list_sizes' + ) + self.patchers['m_ex_list_networks'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStack_1_1_NodeDriver.ex_list_networks' + ) + self.patchers['m_ex_list_security_groups'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStack_1_1_NodeDriver.ex_list_security_groups' + ) + self.patchers['m_get_user_ssh_pubkey'] = patch( + 'teuthology.provision.cloud.util.get_user_ssh_pubkey' + ) + self.patchers['m_list_nodes'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.list_nodes' + ) + self.patchers['m_create_node'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStack_1_1_NodeDriver.create_node' + ) + self.patchers['m_wait_until_running'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.wait_until_running' + ) + self.patchers['m_create_volume'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.create_volume' + ) + self.patchers['m_attach_volume'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.attach_volume' + ) + self.patchers['m_detach_volume'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.detach_volume' + ) + self.patchers['m_list_volumes'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.list_volumes' + ) + self.patchers['m_destroy_volume'] = patch( + 'libcloud.compute.drivers.openstack' + '.OpenStackNodeDriver.destroy_volume' + ) + self.patchers['m_get_service_catalog'] = patch( + 'libcloud.common.openstack' + '.OpenStackBaseConnection.get_service_catalog' + ) + self.patchers['m_auth_token'] = patch( + 'teuthology.provision.cloud.util.AuthToken' + ) + self.patchers['m_get_endpoint'] = patch( + 'libcloud.common.openstack' + '.OpenStackBaseConnection.get_endpoint', + ) + self.patchers['m_connect'] = patch( + 'libcloud.common.base' + '.Connection.connect', + ) + self.patchers['m_sleep'] = patch( + 'time.sleep' + ) + self.patchers['m_get'] = patch( + 'requests.get' + ) + self.mocks = dict() + for name, patcher in self.patchers.items(): + self.mocks[name] = patcher.start() + self.mocks['m_get_endpoint'].return_value = 'endpoint' + + def teardown(self): + for patcher in self.patchers.values(): + patcher.stop() + + +class TestOpenStackProvider(TestOpenStackBase): + klass = cloud.openstack.OpenStackProvider + + def test_init(self): + obj = cloud.get_provider('my_provider') + assert obj.name == 'my_provider' + assert obj.driver_name == 'openstack' + assert obj.conf == test_config['providers']['my_provider'] + + def test_driver(self): + token = self.mocks['m_auth_token'].return_value + self.mocks['m_auth_token'].return_value.__enter__.return_value = token + token.value = None + obj = cloud.get_provider('my_provider') + assert isinstance(obj.driver, get_driver('openstack')) + assert obj._auth_token.value is None + + def test_images(self): + obj = cloud.get_provider('my_provider') + self.mocks['m_list_images'].return_value = [ + get_fake_obj(attributes=dict(name=_)) + for _ in ['image0', 'image1']] + assert not hasattr(obj, '_images') + assert [_.name for _ in obj.images] == ['image0', 'image1'] + assert hasattr(obj, '_images') + + def test_exclude_image(self): + obj = cloud.get_provider('image_exclude_provider') + self.mocks['m_list_images'].return_value = [ + get_fake_obj(attributes=dict(name=_)) + for _ in ['image0', 'image1', + 'image2-exclude1', 'image3-exclude2']] + assert not hasattr(obj, '_images') + assert [_.name for _ in obj.images] == ['image0', 'image1'] + assert hasattr(obj, '_images') + + def test_sizes(self): + obj = cloud.get_provider('my_provider') + fake_sizes = [get_fake_obj(attributes=dict(name='size%s' % i)) for + i in range(2)] + self.mocks['m_list_sizes'].return_value = fake_sizes + assert not hasattr(obj, '_sizes') + assert [s.name for s in obj.sizes] == ['size0', 'size1'] + assert hasattr(obj, '_sizes') + + def test_networks(self): + obj = cloud.get_provider('my_provider') + nets = [get_fake_obj(attributes=dict(name=i)) for i in ['net0', 'net1']] + self.mocks['m_ex_list_networks'].return_value = nets + assert not hasattr(obj, '_networks') + assert [i.name for i in obj.networks] == [i.name for i in nets] + assert hasattr(obj, '_networks') + self.mocks['m_ex_list_networks'].side_effect = AttributeError + obj = cloud.get_provider('my_provider') + assert not hasattr(obj, '_networks') + assert obj.networks == list() + assert hasattr(obj, '_networks') + + def test_security_groups(self): + obj = cloud.get_provider('my_provider') + self.mocks['m_ex_list_security_groups'].return_value = ['sg0', 'sg1'] + assert not hasattr(obj, '_security_groups') + assert obj.security_groups == ['sg0', 'sg1'] + assert hasattr(obj, '_security_groups') + self.mocks['m_ex_list_security_groups'].side_effect = AttributeError + obj = cloud.get_provider('my_provider') + assert not hasattr(obj, '_security_groups') + assert obj.security_groups == list() + assert hasattr(obj, '_security_groups') + + +class TestOpenStackCustomProvisioner(TestOpenStackBase): + klass = cloud.openstack.OpenStackProvisioner + def get_obj( + self, name='node_name', os_type='ubuntu', + os_version='16.04', conf=None, test_conf=None): + + if test_conf: + yaml_file = os.path.dirname(__file__) + '/' + test_conf + print("Reading conf: %s" % yaml_file) + with open(yaml_file) as f: + teuth_conf=yaml.safe_load(f) + print(teuth_conf) + config.libcloud = deepcopy(teuth_conf['libcloud'] or test_config) + else: + config.libcloud = deepcopy(test_config) + return cloud.get_provisioner( + node_type='my_provider', + name=name, + os_type=os_type, + os_version=os_version, + conf=conf, + ) + + @mark.parametrize( + "conf", + [ + dict( + path='test_openstack_userdata_conf.yaml', + runcmd_head=['uptime', 'date'], + ssh_authorized_keys=['user_public_key1', 'user_public_key2'], + user_ssh_pubkey='my_ssh_key', + os_version='16.04', + os_type='ubuntu', + ), + dict( + path='test_openstack_userdata_conf.yaml', + runcmd_head=['uptime', 'date'], + ssh_authorized_keys=['user_public_key1', 'user_public_key2'], + user_ssh_pubkey=None, + os_version='16.04', + os_type='ubuntu', + ), + dict( + os_version='16.04', + os_type='ubuntu', + path=None, + user_ssh_pubkey=None, + ), + ] + ) + def test_userdata_conf(self, conf): + self.mocks['m_get_user_ssh_pubkey'].return_value = conf['user_ssh_pubkey'] + obj = self.get_obj(os_version=conf['os_version'], + os_type=conf['os_type'], + test_conf=conf['path']) + userdata = yaml.safe_load(obj.userdata) + print(">>>> ", obj.conf) + print(">>>> ", obj.provider.conf) + print(">>>> ", obj.provider) + print(obj.userdata) + if conf and 'path' in conf and conf['path']: + assert userdata['runcmd'][0:len(conf['runcmd_head'])] == conf['runcmd_head'] + assert userdata['bootcmd'] == [ + 'SuSEfirewall2 stop || true', + 'service firewalld stop || true', + ] + assert 'packages' not in userdata + else: + assert 'bootcmd' not in userdata + assert userdata['packages'] == ['git', 'wget', 'python', 'ntp'] + assert userdata['user'] == obj.user + assert userdata['hostname'] == obj.hostname + if 'user_ssh_pubkey' in conf and conf['user_ssh_pubkey']: + assert userdata['ssh_authorized_keys'][-1] == conf['user_ssh_pubkey'] + if 'ssh_authorized_keys' in conf: + keys = conf['ssh_authorized_keys'] + assert userdata['ssh_authorized_keys'][0:len(keys)] == keys + else: + if 'ssh_authorized_keys' in conf: + keys = conf['ssh_authorized_keys'] + assert userdata['ssh_authorized_keys'][0:len(keys)] == keys + else: + assert 'ssh_authorized_keys' not in userdata + + @mark.parametrize( + "conf", + [ + dict( + path='test_openstack_userdata_conf.yaml', + runcmd_head=['uptime', 'date'], + ), + dict( + path=None, + ), + ] + ) + def test_userdata_conf_runcmd(self, conf): + self.mocks['m_get_user_ssh_pubkey'].return_value = None + obj = self.get_obj(test_conf=conf['path']) + userdata = yaml.safe_load(obj.userdata) + assert userdata['runcmd'][-2:] == [['passwd', '-d', 'ubuntu'], ['touch', '/.teuth_provisioned']] + + @mark.parametrize( + "conf", + [ + dict( + path='test_openstack_userdata_conf.yaml', + packages=None, + ), + dict( + path=None, + packages=['git', 'wget', 'python', 'ntp'] + ), + ] + ) + def test_userdata_conf_packages(self, conf): + self.mocks['m_get_user_ssh_pubkey'].return_value = None + obj = self.get_obj(test_conf=conf['path']) + userdata = yaml.safe_load(obj.userdata) + assert userdata.get('packages', None) == conf['packages'] + +class TestOpenStackProvisioner(TestOpenStackBase): + klass = cloud.openstack.OpenStackProvisioner + + def get_obj( + self, name='node_name', os_type='ubuntu', + os_version='16.04', conf=None): + return cloud.get_provisioner( + node_type='my_provider', + name=name, + os_type=os_type, + os_version=os_version, + conf=conf, + ) + + def test_init(self): + with patch.object( + self.klass, + '_read_conf', + ) as m_read_conf: + self.get_obj() + assert len(m_read_conf.call_args_list) == 1 + + @mark.parametrize( + 'input_conf', + [ + dict(machine=dict( + disk=42, + ram=9001, + cpus=3, + )), + dict(volumes=dict( + count=3, + size=100, + )), + dict(), + dict( + machine=dict( + disk=1, + ram=2, + cpus=3, + ), + volumes=dict( + count=4, + size=5, + ) + ), + dict( + machine=dict( + disk=100, + ), + ), + ] + ) + def test_read_conf(self, input_conf): + obj = self.get_obj(conf=input_conf) + for topic in ['machine', 'volumes']: + combined = cloud.util.combine_dicts( + [input_conf, config.openstack], + lambda x, y: x > y, + ) + assert obj.conf[topic] == combined[topic] + + @mark.parametrize( + 'input_conf, expected_machine, expected_vols', + [ + [ + dict(openstack=[ + dict(machine=dict(disk=64, ram=10000, cpus=3)), + dict(volumes=dict(count=1, size=1)), + ]), + dict(disk=64, ram=10000, cpus=3), + dict(count=1, size=1), + ], + [ + dict(openstack=[ + dict(machine=dict(cpus=3)), + dict(machine=dict(disk=1, ram=9000)), + dict(machine=dict(disk=50, ram=2, cpus=1)), + dict(machine=dict()), + dict(volumes=dict()), + dict(volumes=dict(count=0, size=0)), + dict(volumes=dict(count=1, size=0)), + dict(volumes=dict(size=1)), + ]), + dict(disk=50, ram=9000, cpus=3), + dict(count=1, size=1), + ], + [ + dict(openstack=[ + dict(volumes=dict(count=3, size=30)), + dict(volumes=dict(size=50)), + ]), + None, + dict(count=3, size=50), + ], + [ + dict(openstack=[ + dict(machine=dict(disk=100)), + dict(volumes=dict(count=3, size=30)), + ]), + dict(disk=100, ram=8000, cpus=1), + dict(count=3, size=30), + ], + ] + ) + def test_read_conf_legacy( + self, input_conf, expected_machine, expected_vols): + obj = self.get_obj(conf=input_conf) + if expected_machine is not None: + assert obj.conf['machine'] == expected_machine + else: + assert obj.conf['machine'] == config.openstack['machine'] + if expected_vols is not None: + assert obj.conf['volumes'] == expected_vols + + @mark.parametrize( + "os_type, os_version, should_find", + [ + ('centos', '7', True), + ('BeOS', '42', False), + ] + ) + def test_image(self, os_type, os_version, should_find): + image_attrs = [ + dict(name='ubuntu-14.04'), + dict(name='ubuntu-16.04'), + dict(name='centos-7.0'), + ] + fake_images = list() + for item in image_attrs: + fake_images.append( + get_fake_obj(attributes=item) + ) + obj = self.get_obj(os_type=os_type, os_version=os_version) + self.mocks['m_list_images'].return_value = fake_images + if should_find: + assert obj.os_version in obj.image.name + assert obj.image in fake_images + else: + with raises(RuntimeError): + obj.image + + @mark.parametrize( + "input_attrs, func_or_exc", + [ + (dict(ram=2**16), + lambda s: s.ram == 2**16), + (dict(disk=9999), + lambda s: s.disk == 9999), + (dict(cpus=99), + lambda s: s.vcpus == 99), + (dict(ram=2**16, disk=9999, cpus=99), + IndexError), + ] + ) + def test_size(self, input_attrs, func_or_exc): + size_attrs = [ + dict(ram=8000, disk=9999, vcpus=99, name='s0'), + dict(ram=2**16, disk=20, vcpus=99, name='s1'), + dict(ram=2**16, disk=9999, vcpus=1, name='s2'), + ] + fake_sizes = list() + for item in size_attrs: + fake_sizes.append( + get_fake_obj(attributes=item) + ) + base_spec = dict(machine=dict( + ram=1, + disk=1, + cpus=1, + )) + spec = deepcopy(base_spec) + spec['machine'].update(input_attrs) + obj = self.get_obj(conf=spec) + self.mocks['m_list_sizes'].return_value = fake_sizes + if isinstance(func_or_exc, type): + with raises(func_or_exc): + obj.size + else: + assert obj.size in fake_sizes + assert func_or_exc(obj.size) is True + + @mark.parametrize( + "wanted_groups", + [ + ['group1'], + ['group0', 'group2'], + [], + ] + ) + def test_security_groups(self, wanted_groups): + group_names = ['group0', 'group1', 'group2'] + fake_groups = list() + for name in group_names: + fake_groups.append( + get_fake_obj(attributes=dict(name=name)) + ) + self.mocks['m_ex_list_security_groups'].return_value = fake_groups + obj = self.get_obj() + assert obj.security_groups is None + obj = self.get_obj() + obj.provider.conf['security_groups'] = wanted_groups + assert [g.name for g in obj.security_groups] == wanted_groups + + def test_security_groups_exc(self): + fake_groups = [ + get_fake_obj(attributes=dict(name='sg')) for i in range(2) + ] + obj = self.get_obj() + obj.provider.conf['security_groups'] = ['sg'] + with raises(RuntimeError): + obj.security_groups + self.mocks['m_ex_list_security_groups'].return_value = fake_groups + obj = self.get_obj() + obj.provider.conf['security_groups'] = ['sg'] + with raises(RuntimeError): + obj.security_groups + + @mark.parametrize( + "ssh_key", + [ + 'my_ssh_key', + None, + ] + ) + def test_userdata(self, ssh_key): + self.mocks['m_get_user_ssh_pubkey'].return_value = ssh_key + obj = self.get_obj() + userdata = yaml.safe_load(obj.userdata) + assert userdata['user'] == obj.user + assert userdata['hostname'] == obj.hostname + if ssh_key: + assert userdata['ssh_authorized_keys'] == [ssh_key] + else: + assert 'ssh_authorized_keys' not in userdata + + @mark.parametrize( + 'wanted_name, should_find, exception', + [ + ('node0', True, None), + ('node1', True, None), + ('node2', False, RuntimeError), + ('node3', False, None), + ] + ) + def test_node(self, wanted_name, should_find, exception): + node_names = ['node0', 'node1', 'node2', 'node2'] + fake_nodes = list() + for name in node_names: + fake_nodes.append( + get_fake_obj(attributes=dict(name=name)) + ) + self.mocks['m_list_nodes'].return_value = fake_nodes + obj = self.get_obj(name=wanted_name) + if should_find: + assert obj.node.name == wanted_name + elif exception: + with raises(exception) as excinfo: + obj.node + assert excinfo.value.message + else: + assert obj.node is None + + @mark.parametrize( + 'networks, security_groups', + [ + ([], []), + (['net0'], []), + ([], ['sg0']), + (['net0'], ['sg0']), + ] + ) + def test_create(self, networks, security_groups): + node_name = 'node0' + fake_sizes = [ + get_fake_obj( + attributes=dict(ram=2**16, disk=9999, vcpus=99, name='s0')), + ] + fake_security_groups = [ + get_fake_obj(attributes=dict(name=name)) + for name in security_groups + ] + self.mocks['m_ex_list_networks'].return_value = networks + self.mocks['m_ex_list_security_groups'].return_value = \ + fake_security_groups + self.mocks['m_list_sizes'].return_value = fake_sizes + fake_images = [ + get_fake_obj(attributes=dict(name='ubuntu-16.04')), + ] + self.mocks['m_list_images'].return_value = fake_images + self.mocks['m_get_user_ssh_pubkey'].return_value = 'ssh_key' + fake_node = get_fake_obj(attributes=dict(name=node_name)) + fake_ips = ['555.123.4.0'] + self.mocks['m_create_node'].return_value = fake_node + self.mocks['m_wait_until_running'].return_value = \ + [(fake_node, fake_ips)] + obj = self.get_obj(name=node_name) + obj._networks = networks + obj.provider.conf['security_groups'] = security_groups + p_wait_for_ready = patch( + 'teuthology.provision.cloud.openstack.OpenStackProvisioner' + '._wait_for_ready' + ) + with p_wait_for_ready: + res = obj.create() + assert res is obj.node + # Test once again to ensure that if volume creation/attachment fails, + # we destroy any remaining volumes and consider the node creation to + # have failed as well. + del obj._node + with p_wait_for_ready: + obj.conf['volumes']['count'] = 1 + obj.provider.driver.create_volume.side_effect = Exception + with patch.object(obj, '_destroy_volumes'): + assert obj.create() is False + assert obj._destroy_volumes.called_once_with() + + def test_update_dns(self): + config.nsupdate_url = 'nsupdate_url' + obj = self.get_obj() + obj.name = 'x' + obj.ips = ['y'] + obj._update_dns() + call_args = self.mocks['m_get'].call_args_list + assert len(call_args) == 1 + url_base, query_string = call_args[0][0][0].split('?') + assert url_base == 'nsupdate_url' + parsed_query = parse_qs(query_string) + assert parsed_query == dict(name=['x'], ip=['y']) + + @mark.parametrize( + 'nodes', + [[], [Mock()], [Mock(), Mock()]] + ) + def test_destroy(self, nodes): + with patch( + 'teuthology.provision.cloud.openstack.' + 'OpenStackProvisioner._find_nodes' + ) as m_find_nodes: + m_find_nodes.return_value = nodes + obj = self.get_obj() + result = obj.destroy() + if not all(nodes): + assert result is True + else: + for node in nodes: + assert node.destroy.called_once_with() + + _volume_matrix = ( + 'count, size, should_succeed', + [ + (1, 10, True), + (0, 10, True), + (10, 1, True), + (1, 10, False), + (10, 1, False), + ] + ) + + @mark.parametrize(*_volume_matrix) + def test_create_volumes(self, count, size, should_succeed): + obj_conf = dict(volumes=dict(count=count, size=size)) + obj = self.get_obj(conf=obj_conf) + node = get_fake_obj() + if not should_succeed: + obj.provider.driver.create_volume.side_effect = Exception + obj._node = node + result = obj._create_volumes() + assert result is should_succeed + if should_succeed: + create_calls = obj.provider.driver.create_volume.call_args_list + attach_calls = obj.provider.driver.attach_volume.call_args_list + assert len(create_calls) == count + assert len(attach_calls) == count + for i in range(count): + vol_size, vol_name = create_calls[i][0] + assert vol_size == size + assert vol_name == '%s_%s' % (obj.name, i) + assert attach_calls[i][0][0] is obj._node + assert attach_calls[i][1]['device'] is None + + @mark.parametrize(*_volume_matrix) + def test_destroy_volumes(self, count, size, should_succeed): + obj_conf = dict(volumes=dict(count=count, size=size)) + obj = self.get_obj(conf=obj_conf) + fake_volumes = list() + for i in range(count): + vol_name = '%s_%s' % (obj.name, i) + fake_volumes.append( + get_fake_obj(attributes=dict(name=vol_name)) + ) + obj.provider.driver.list_volumes.return_value = fake_volumes + obj._destroy_volumes() + detach_calls = obj.provider.driver.detach_volume.call_args_list + destroy_calls = obj.provider.driver.destroy_volume.call_args_list + assert len(detach_calls) == count + assert len(destroy_calls) == count + assert len(obj.provider.driver.detach_volume.call_args_list) == count + assert len(obj.provider.driver.destroy_volume.call_args_list) == count + obj.provider.driver.detach_volume.reset_mock() + obj.provider.driver.destroy_volume.reset_mock() + obj.provider.driver.detach_volume.side_effect = Exception + obj.provider.driver.destroy_volume.side_effect = Exception + obj._destroy_volumes() + assert len(obj.provider.driver.detach_volume.call_args_list) == count + assert len(obj.provider.driver.destroy_volume.call_args_list) == count + + def test_destroy_volumes_exc(self): + obj = self.get_obj() + obj.provider.driver.detach_volume.side_effect = Exception + + def test_wait_for_ready(self): + obj = self.get_obj() + obj._node = get_fake_obj(attributes=dict(name='node_name')) + with patch.multiple( + 'teuthology.orchestra.remote.Remote', + connect=DEFAULT, + run=DEFAULT, + ) as mocks: + obj._wait_for_ready() + mocks['connect'].side_effect = socket.error + with raises(MaxWhileTries): + obj._wait_for_ready() diff --git a/teuthology/provision/cloud/test/test_openstack_userdata_conf.yaml b/teuthology/provision/cloud/test/test_openstack_userdata_conf.yaml new file mode 100644 index 0000000000..f3e87a8461 --- /dev/null +++ b/teuthology/provision/cloud/test/test_openstack_userdata_conf.yaml @@ -0,0 +1,24 @@ +libcloud: + providers: + my_provider: + allow_networks: + - sesci + userdata: + 'ubuntu-16.04': + bootcmd: + - 'SuSEfirewall2 stop || true' + - 'service firewalld stop || true' + runcmd: + - 'uptime' + - 'date' + - 'zypper in -y lsb-release make gcc gcc-c++ chrony || true' + - 'systemctl enable chronyd.service || true' + - 'systemctl start chronyd.service || true' + ssh_authorized_keys: + - user_public_key1 + - user_public_key2 + driver: openstack + driver_args: + username: user + password: password + ex_force_auth_url: 'http://127.0.0.1:9999/v2.0/tokens' diff --git a/teuthology/provision/cloud/util.py b/teuthology/provision/cloud/util.py new file mode 100644 index 0000000000..a6f137e941 --- /dev/null +++ b/teuthology/provision/cloud/util.py @@ -0,0 +1,116 @@ +import datetime +import dateutil.tz +import dateutil.parser +import json +import os + +from teuthology.util.flock import FileLock + +def get_user_ssh_pubkey(path='~/.ssh/id_rsa.pub'): + full_path = os.path.expanduser(path) + if not os.path.exists(full_path): + return + with open(full_path) as f: + return f.read().strip() + + +def combine_dicts(list_of_dicts, func): + """ + A useful function to merge a list of dicts. Most of the work is done by + selective_update(). + + :param list_of_dicts: A list of dicts to combine using selective_update() + :param func: A comparison function that will be passed to + selective_update() along with values from each input + dict + :returns: The new, merged, dict + """ + new_dict = dict() + for item in list_of_dicts: + selective_update(new_dict, item, func) + return new_dict + + +def selective_update(a, b, func): + """ + Given two dicts and a comparison function, recursively inspects key-value + pairs in the second dict and merges them into the first dict if func() + returns a "Truthy" value. + + Example:: + + >>> a = dict(x=0, y=1, z=3) + >>> b = dict(x=1, y=2, z=0) + >>> selective_update(a, b, lambda foo, bar: foo > bar) + >>> print(a) + {'x': 1, 'y': 2, 'z': 3} + + :param a: A dict. This is modified in-place! + :param b: Another dict. + :param func: A binary comparison function that will be called similarly to: + func(a[key], b[key]) for each key in b. + """ + for key, value in b.items(): + if key not in a: + a[key] = value + continue + if isinstance(value, dict): + selective_update(a[key], value, func) + elif func(value, a[key]): + a[key] = value + + +class AuthToken(object): + time_format = '%Y-%m-%d %H:%M:%S%z' + + def __init__(self, name, directory=os.path.expanduser('~/.cache/')): + self.name = name + self.directory = directory + self.path = os.path.join(directory, name) + self.lock_path = "%s.lock" % self.path + self.expires = None + self.value = None + self.endpoint = None + + def read(self): + if not os.path.exists(self.path): + self.value = None + self.expires = None + self.endpoint = None + return + with open(self.path, 'r') as obj: + string = obj.read() + obj = json.loads(string) + self.expires = dateutil.parser.parse(obj['expires']) + if self.expired: + self.value = None + self.endpoint = None + else: + self.value = obj['value'] + self.endpoint = obj['endpoint'] + + def write(self, value, expires, endpoint): + obj = dict( + value=value, + expires=datetime.datetime.strftime(expires, self.time_format), + endpoint=endpoint, + ) + string = json.dumps(obj) + with open(self.path, 'w') as obj: + obj.write(string) + + @property + def expired(self): + if self.expires is None: + return True + utcnow = datetime.datetime.now(dateutil.tz.tzutc()) + offset = datetime.timedelta(minutes=30) + return self.expires < (utcnow + offset) + + def __enter__(self): + with FileLock(self.lock_path): + self.read() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass diff --git a/teuthology/provision/downburst.py b/teuthology/provision/downburst.py new file mode 100644 index 0000000000..17b6818b07 --- /dev/null +++ b/teuthology/provision/downburst.py @@ -0,0 +1,322 @@ +import json +import logging +import os +import subprocess +import tempfile +import yaml + +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.misc import decanonicalize_hostname +from teuthology.misc import deep_merge +from teuthology.lock import query + +log = logging.getLogger(__name__) + + +def downburst_executable(): + """ + First check for downburst in the user's path. + Then check in ~/src, ~ubuntu/src, and ~teuthology/src. + Return '' if no executable downburst is found. + """ + if config.downburst: + if isinstance(config.downburst, dict): + if 'path' in config.downburst: + return config.downburst['path'] + else: + return config.downburst + path = os.environ.get('PATH', None) + if path: + for p in os.environ.get('PATH', '').split(os.pathsep): + pth = os.path.join(p, 'downburst') + if os.access(pth, os.X_OK): + return pth + import pwd + little_old_me = pwd.getpwuid(os.getuid()).pw_name + for user in [little_old_me, 'ubuntu', 'teuthology']: + pth = os.path.expanduser( + "~%s/src/downburst/virtualenv/bin/downburst" % user) + if os.access(pth, os.X_OK): + return pth + return '' + + +def downburst_environment(): + env = dict() + discover_url = os.environ.get('DOWNBURST_DISCOVER_URL') + if config.downburst and not discover_url: + if isinstance(config.downburst, dict): + discover_url = config.downburst.get('discover_url') + if discover_url: + env['DOWNBURST_DISCOVER_URL'] = discover_url + return env + + +class Downburst(object): + """ + A class that provides methods for creating and destroying virtual machine + instances using downburst: https://github.com/ceph/downburst + """ + def __init__(self, name, os_type, os_version, status=None, user='ubuntu', + logfile=None): + self.name = name + self.shortname = decanonicalize_hostname(self.name) + self.os_type = os_type + self.os_version = os_version + self.status = status or query.get_status(self.name) + self.config_path = None + self.user_path = None + self.user = user + self.logfile = logfile + self.host = decanonicalize_hostname(self.status['vm_host']['name']) + self.executable = downburst_executable() + self.environment = downburst_environment() + + def create(self): + """ + Launch a virtual machine instance. + + If creation fails because an instance with the specified name is + already running, first destroy it, then try again. This process will + repeat two more times, waiting 60s between tries, before giving up. + """ + if not self.executable: + log.error("No downburst executable found.") + return False + self.build_config() + success = None + with safe_while(sleep=60, tries=3, + action="downburst create") as proceed: + while proceed(): + (returncode, stdout, stderr) = self._run_create() + log.info(stdout) + log.info(stderr) + if returncode == 0: + log.info("Downburst created %s: %s" % (self.name, + stdout.strip())) + success = True + break + elif stderr: + # If the guest already exists first destroy then re-create: + if 'exists' in stderr: + success = False + log.info("Guest files exist. Re-creating guest: %s" % + (self.name)) + self.destroy() + else: + success = False + log.info("Downburst failed on %s: %s" % ( + self.name, stderr.strip())) + break + return success + + def _run_create(self): + """ + Used by create(), this method is what actually calls downburst when + creating a virtual machine instance. + """ + if not self.config_path: + raise ValueError("I need a config_path!") + if not self.user_path: + raise ValueError("I need a user_path!") + + args = [self.executable, '-v', '-c', self.host] + if self.logfile: + args.extend(['-l', self.logfile]) + args.extend([ + 'create', + '--wait', + '--meta-data=%s' % self.config_path, + '--user-data=%s' % self.user_path, + self.shortname, + ]) + log.info("Provisioning a {distro} {distroversion} vps".format( + distro=self.os_type, + distroversion=self.os_version + )) + log.debug(args) + proc = subprocess.Popen(args, universal_newlines=True, + env=self.environment, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = proc.communicate() + return (proc.returncode, out, err) + + def destroy(self): + """ + Destroy (shutdown and delete) a virtual machine instance. + """ + executable = self.executable + if not executable: + log.error("No downburst executable found.") + return False + args = [executable, '-v', '-c', self.host] + if self.logfile: + args.extend(['-l', self.logfile]) + args.extend(['destroy', self.shortname]) + log.debug(args) + proc = subprocess.Popen(args, universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE,) + out, err = proc.communicate() + log.info(out) + log.info(err) + if proc.returncode != 0: + not_found_msg = "no domain with matching name '%s'" % self.shortname + if not_found_msg in err: + log.warning("Ignoring error during destroy: %s", err) + return True + log.error("Error destroying %s: %s", self.name, err) + return False + else: + out_str = ': %s' % out if out else '' + log.info("Destroyed %s%s" % (self.name, out_str)) + return True + + def build_config(self): + """ + Assemble a configuration to pass to downburst, and write it to a file. + """ + config_fd = tempfile.NamedTemporaryFile(delete=False, mode='wt') + + os_type = self.os_type.lower() + os_version = self.os_version.lower() + + mac_address = self.status['mac_address'] + defaults = dict( + downburst=dict( + machine=dict( + disk=os.environ.get('DOWNBURST_DISK_SIZE', '100G'), + ram=os.environ.get('DOWNBURST_RAM_SIZE', '3.8G'), + cpus=int(os.environ.get('DOWNBURST_CPUS', 1)), + volumes=dict( + count=int(os.environ.get('DOWNBURST_EXTRA_DISK_NUMBER', 4)), + size=os.environ.get('DOWNBURST_EXTRA_DISK_SIZE', '100G'), + ), + ), + ) + ) + downburst_config = defaults['downburst'] + if config.downburst and isinstance(config.downburst, dict): + deep_merge(downburst_config, config.downburst) + log.debug('downburst_config: %s', downburst_config) + machine = downburst_config['machine'] + log.debug('Using machine config: %s', machine) + file_info = { + 'disk-size': machine['disk'], + 'ram': machine['ram'], + 'cpus': machine['cpus'], + 'networks': [ + {'source': 'front', 'mac': mac_address}], + 'distro': os_type, + 'distroversion': self.os_version, + 'additional-disks': machine['volumes']['count'], + 'additional-disks-size': machine['volumes']['size'], + 'arch': 'x86_64', + } + fqdn = self.name.split('@')[1] + file_out = { + 'downburst': file_info, + 'local-hostname': fqdn, + } + yaml.safe_dump(file_out, config_fd) + self.config_path = config_fd.name + + user_info = { + 'user': self.user, + # Remove the user's password so console logins are possible + 'runcmd': [ + ['passwd', '-d', self.user], + ] + } + # for opensuse-15.2 we need to replace systemd-logger with rsyslog for teuthology + if os_type == 'opensuse' and os_version == '15.2': + user_info['runcmd'].extend([ + ['zypper', 'rm', '-y', 'systemd-logger'], + ['zypper', 'in', '-y', 'rsyslog'], + ]) + # Install git on downbursted VMs to clone upstream linux-firmware. + # Issue #17154 + if 'packages' not in user_info: + user_info['packages'] = list() + user_info['packages'].extend([ + 'git', + 'wget', + ]) + # On CentOS/RHEL/Fedora, write the correct mac address and + # install redhab-lsb-core for `lsb_release` + if os_type in ['centos', 'rhel', 'fedora']: + user_info['runcmd'].extend([ + ['sed', '-ie', 's/HWADDR=".*"/HWADDR="%s"/' % mac_address, + '/etc/sysconfig/network-scripts/ifcfg-eth0'], + ]) + user_info['packages'].append('redhat-lsb-core') + # On Ubuntu, starting with 16.04, and Fedora, starting with 24, we need + # to install 'python' to get python2.7, which ansible needs + if os_type in ('ubuntu', 'fedora'): + user_info['packages'].append('python') + user_fd = tempfile.NamedTemporaryFile(delete=False, mode='wt') + user_str = "#cloud-config\n" + yaml.safe_dump(user_info) + user_fd.write(user_str) + self.user_path = user_fd.name + return True + + def remove_config(self): + """ + Remove the downburst configuration file created by build_config() + """ + if self.config_path and os.path.exists(self.config_path): + os.remove(self.config_path) + self.config_path = None + return True + if self.user_path and os.path.exists(self.user_path): + os.remove(self.user_path) + self.user_path = None + return True + return False + + def __del__(self): + self.remove_config() + + +def get_distro_from_downburst(): + """ + Return a table of valid distros. + + If downburst is in path use it. If either downburst is unavailable, + or if downburst is unable to produce a json list, then use a default + table. + """ + default_table = {'rhel_minimal': ['6.4', '6.5'], + 'fedora': ['17', '18', '19', '20', '22'], + 'centos': ['6.3', '6.4', '6.5', '7.0', + '7.2', '7.4', '8.2'], + 'centos_minimal': ['6.4', '6.5'], + 'ubuntu': ['8.04(hardy)', '9.10(karmic)', + '10.04(lucid)', '10.10(maverick)', + '11.04(natty)', '11.10(oneiric)', + '12.04(precise)', '12.10(quantal)', + '13.04(raring)', '13.10(saucy)', + '14.04(trusty)', 'utopic(utopic)', + '16.04(xenial)', '18.04(bionic)', + '20.04(focal)'], + 'sles': ['12-sp3', '15-sp1', '15-sp2'], + 'opensuse': ['12.3', '15.1', '15.2'], + 'debian': ['6.0', '7.0', '8.0']} + executable_cmd = downburst_executable() + environment_dict = downburst_environment() + if not executable_cmd: + log.warning("Downburst not found!") + log.info('Using default values for supported os_type/os_version') + return default_table + try: + log.debug(executable_cmd) + output = subprocess.check_output([executable_cmd, 'list-json'], + env=environment_dict) + downburst_data = json.loads(output) + return downburst_data + except (subprocess.CalledProcessError, OSError): + log.exception("Error calling downburst!") + log.info('Using default values for supported os_type/os_version') + return default_table diff --git a/teuthology/provision/fog.py b/teuthology/provision/fog.py new file mode 100644 index 0000000000..9be85b61c8 --- /dev/null +++ b/teuthology/provision/fog.py @@ -0,0 +1,312 @@ +import json +import logging +import requests +import socket +import re + +from datetime import datetime +from paramiko import SSHException +from paramiko.ssh_exception import NoValidConnectionsError + +import teuthology.orchestra + +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.exceptions import MaxWhileTries +from teuthology import misc + +log = logging.getLogger(__name__) + + +def enabled(warn=False): + """ + Check for required FOG settings + + :param warn: Whether or not to log a message containing unset parameters + :returns: True if they are present; False if they are not + """ + fog_conf = config.get('fog', dict()) + params = ['endpoint', 'api_token', 'user_token', 'machine_types'] + unset = [param for param in params if not fog_conf.get(param)] + if unset and warn: + log.warning( + "FOG disabled; set the following config options to enable: %s", + ' '.join(unset), + ) + return (unset == []) + + +def get_types(): + """ + Fetch and parse config.fog['machine_types'] + + :returns: The list of FOG-configured machine types. An empty list if FOG is + not configured. + """ + if not enabled(): + return [] + fog_conf = config.get('fog', dict()) + types = fog_conf.get('machine_types', '') + if not isinstance(types, list): + types = types.split(',') + return [type_ for type_ in types if type_] + + +class FOG(object): + """ + Reimage bare-metal machines with https://fogproject.org/ + """ + timestamp_format = '%Y-%m-%d %H:%M:%S' + + def __init__(self, name, os_type, os_version): + self.remote = teuthology.orchestra.remote.Remote( + misc.canonicalize_hostname(name)) + self.name = self.remote.hostname + self.shortname = self.remote.shortname + self.os_type = os_type + self.os_version = os_version + self.log = log.getChild(self.shortname) + + def create(self): + """ + Initiate deployment and wait until completion + """ + if not enabled(): + raise RuntimeError("FOG is not configured!") + host_data = self.get_host_data() + host_id = int(host_data['id']) + self.set_image(host_id) + task_id = self.schedule_deploy_task(host_id) + try: + # Use power_off/power_on because other methods call + # _wait_for_login, which will not work here since the newly-imaged + # host will have an incorrect hostname + self.remote.console.power_off() + self.remote.console.power_on() + self.wait_for_deploy_task(task_id) + except Exception: + self.cancel_deploy_task(task_id) + raise + self._wait_for_ready() + self._fix_hostname() + self.log.info("Deploy complete!") + + def do_request(self, url_suffix, data=None, method='GET', verify=True): + """ + A convenience method to submit a request to the FOG server + :param url_suffix: The portion of the URL to append to the endpoint, + e.g. '/system/info' + :param data: Optional JSON data to submit with the request + :param method: The HTTP method to use for the request (default: 'GET') + :param verify: Whether or not to raise an exception if the request is + unsuccessful (default: True) + :returns: A requests.models.Response object + """ + req_kwargs = dict( + headers={ + 'fog-api-token': config.fog['api_token'], + 'fog-user-token': config.fog['user_token'], + }, + ) + if data is not None: + req_kwargs['data'] = data + req = requests.Request( + method, + config.fog['endpoint'] + url_suffix, + **req_kwargs + ) + prepped = req.prepare() + resp = requests.Session().send(prepped) + if not resp.ok and resp.text: + self.log.error("%s: %s", resp.status_code, resp.text) + if verify: + resp.raise_for_status() + return resp + + def get_host_data(self): + """ + Locate the host we want to use, and return the FOG object which + represents it + :returns: A dict describing the host + """ + resp = self.do_request( + '/host', + data=json.dumps(dict(name=self.shortname)), + ) + obj = resp.json() + if obj['count'] == 0: + raise RuntimeError("Host %s not found!" % self.shortname) + if obj['count'] > 1: + raise RuntimeError( + "More than one host found for %s" % self.shortname) + return obj['hosts'][0] + + def get_image_data(self): + """ + Locate the image we want to use, and return the FOG object which + represents it + :returns: A dict describing the image + """ + name = '_'.join([ + self.remote.machine_type, self.os_type.lower(), self.os_version]) + resp = self.do_request( + '/image', + data=json.dumps(dict(name=name)), + ) + obj = resp.json() + if not obj['count']: + raise RuntimeError( + "Could not find an image for %s %s" % + (self.os_type, self.os_version)) + return obj['images'][0] + + def set_image(self, host_id): + """ + Tell FOG to use the proper image on the next deploy + :param host_id: The id of the host to deploy + """ + image_data = self.get_image_data() + image_id = int(image_data['id']) + self.do_request( + '/host/%s' % host_id, + method='PUT', + data=json.dumps(dict(imageID=image_id)), + ) + + def schedule_deploy_task(self, host_id): + """ + :param host_id: The id of the host to deploy + :returns: The id of the scheduled task + """ + self.log.info( + "Scheduling deploy of %s %s", + self.os_type, self.os_version) + # First, let's find and cancel any existing deploy tasks for the host. + for task in self.get_deploy_tasks(): + self.cancel_deploy_task(task['id']) + # Next, we need to find the right tasktype ID + resp = self.do_request( + '/tasktype', + data=json.dumps(dict(name='deploy')), + ) + tasktypes = [obj for obj in resp.json()['tasktypes'] + if obj['name'].lower() == 'deploy'] + deploy_id = int(tasktypes[0]['id']) + # Next, schedule the task + resp = self.do_request( + '/host/%i/task' % host_id, + method='POST', + data='{"taskTypeID": %i}' % deploy_id, + ) + host_tasks = self.get_deploy_tasks() + for task in host_tasks: + timestamp = task['createdTime'] + time_delta = ( + datetime.utcnow() - datetime.strptime( + timestamp, self.timestamp_format) + ).total_seconds() + # There should only be one deploy task matching our host. Just in + # case there are multiple, select a very recent one. + if time_delta < 5: + return task['id'] + + def get_deploy_tasks(self): + """ + :returns: A list of deploy tasks which are active on our host + """ + resp = self.do_request('/task/active') + try: + tasks = resp.json()['tasks'] + except Exception: + self.log.exception("Failed to get deploy tasks!") + return list() + host_tasks = [obj for obj in tasks + if obj['host']['name'] == self.shortname] + return host_tasks + + def deploy_task_active(self, task_id): + """ + :param task_id: The id of the task to query + :returns: True if the task is active + """ + host_tasks = self.get_deploy_tasks() + return any( + [task['id'] == task_id for task in host_tasks] + ) + + def wait_for_deploy_task(self, task_id): + """ + Wait until the specified task is no longer active (i.e., it has + completed) + """ + self.log.info("Waiting for deploy to finish") + with safe_while(sleep=15, tries=60) as proceed: + while proceed(): + if not self.deploy_task_active(task_id): + break + + def cancel_deploy_task(self, task_id): + """ Cancel an active deploy task """ + resp = self.do_request( + '/task/cancel', + method='DELETE', + data='{"id": %s}' % task_id, + ) + resp.raise_for_status() + + def _wait_for_ready(self): + """ Attempt to connect to the machine via SSH """ + with safe_while(sleep=6, tries=100) as proceed: + while proceed(): + try: + self.remote.connect() + break + except ( + socket.error, + SSHException, + NoValidConnectionsError, + MaxWhileTries, + EOFError, + ): + pass + sentinel_file = config.fog.get('sentinel_file', None) + if sentinel_file: + cmd = "while [ ! -e '%s' ]; do sleep 5; done" % sentinel_file + self.remote.run(args=cmd, timeout=600) + self.log.info("Node is ready") + + def _fix_hostname(self): + """ + After a reimage, the host will still have the hostname of the machine + used to create the image initially. Fix that by making a call to + /binhostname and tweaking /etc/hosts. + """ + wrong_hostname = self.remote.sh('hostname').strip() + etc_hosts = self.remote.sh( + 'grep %s /etc/hosts' % wrong_hostname, + check_status=False, + ).strip() + if etc_hosts: + wrong_ip = re.split(r'\s+', etc_hosts.split('\n')[0].strip())[0] + self.remote.run(args="sudo hostname %s" % self.shortname) + self.remote.run( + args="sudo sed -i -e 's/%s/%s/g' /etc/hosts" % ( + wrong_hostname, self.shortname), + ) + self.remote.run( + args="sudo sed -i -e 's/%s/%s/g' /etc/hosts" % ( + wrong_ip, self.remote.ip_address), + ) + self.remote.run( + args="sudo sed -i -e 's/%s/%s/g' /etc/hostname" % ( + wrong_hostname, self.shortname), + check_status=False, + ) + self.remote.run( + args="sudo hostname %s" % self.shortname, + check_status=False, + ) + + def destroy(self): + """A no-op; we just leave idle nodes as-is""" + pass diff --git a/teuthology/provision/openstack.py b/teuthology/provision/openstack.py new file mode 100644 index 0000000000..23066cda38 --- /dev/null +++ b/teuthology/provision/openstack.py @@ -0,0 +1,234 @@ +import json +import logging +import os +import random +import re +import subprocess +import time +import tempfile + +from subprocess import CalledProcessError + +from teuthology import misc + +from teuthology.openstack import OpenStack, OpenStackInstance +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.exceptions import QuotaExceededError + + +log = logging.getLogger(__name__) + + +class ProvisionOpenStack(OpenStack): + """ + A class that provides methods for creating and destroying virtual machine + instances using OpenStack + """ + def __init__(self): + super(ProvisionOpenStack, self).__init__() + self.user_data = tempfile.mktemp() + log.debug("ProvisionOpenStack: " + str(config.openstack)) + self.basename = 'target' + self.up_string = 'The system is finally up' + self.property = "%16x" % random.getrandbits(128) + + def __del__(self): + if os.path.exists(self.user_data): + os.unlink(self.user_data) + + def init_user_data(self, os_type, os_version): + """ + Get the user-data file that is fit for os_type and os_version. + It is responsible for setting up enough for ansible to take + over. + """ + template_path = config['openstack']['user-data'].format( + os_type=os_type, + os_version=os_version) + nameserver = config['openstack'].get('nameserver', '8.8.8.8') + user_data_template = open(template_path).read() + user_data = user_data_template.format( + up=self.up_string, + nameserver=nameserver, + username=self.username, + lab_domain=config.lab_domain) + open(self.user_data, 'w').write(user_data) + + def _openstack(self, subcommand, get=None): + # do not use OpenStack().run because its + # bugous for volume create as of openstackclient 3.2.0 + # https://bugs.launchpad.net/python-openstackclient/+bug/1619726 + #r = OpenStack().run("%s -f json " % command) + json_result = misc.sh("openstack %s -f json" % subcommand) + r = json.loads(json_result) + if get: + return self.get_value(r, get) + return r + + def _create_volume(self, volume_name, size): + """ + Create a volume and return valume id + """ + volume_id = None + try: + volume_id = self._openstack("volume show %s" % volume_name, 'id') + except subprocess.CalledProcessError as e: + if 'No volume with a name or ID' not in e.output: + raise e + if volume_id: + log.warning("Volume {} already exists with ID {}; using it" + .format(volume_name, volume_id)) + volume_id = self._openstack( + "volume create %s" % config['openstack'].get('volume-create','') + + " --property ownedby=%s" % config['openstack']['ip'] + + " --size %s" % str(size) + ' ' + volume_name, 'id') + if volume_id: + log.info("Volume {} created with ID {}" + .format(volume_name, volume_id)) + return volume_id + else: + raise Exception("Failed to create volume %s" % volume_name) + + def _await_volume_status(self, volume_id, status='available'): + """ + Wait for volume to have status, like 'available' or 'in-use' + """ + with safe_while(sleep=4, tries=50, + action="volume " + volume_id) as proceed: + while proceed(): + try: + volume_status = \ + self._openstack("volume show %s" % volume_id, 'status') + if volume_status == status: + break + else: + log.debug("volume %s not in '%s' status yet" + % (volume_id, status)) + except subprocess.CalledProcessError: + log.warning("volume " + volume_id + + " not information available yet") + + def _attach_volume(self, volume_id, name): + """ + Attach volume to OpenStack instance. + + Try and attach volume to server, wait until volume gets in-use state. + """ + with safe_while(sleep=20, increment=20, tries=3, + action="add volume " + volume_id) as proceed: + while proceed(): + try: + misc.sh("openstack server add volume " + name + " " + volume_id) + break + except subprocess.CalledProcessError: + log.warning("openstack add volume failed unexpectedly; retrying") + self._await_volume_status(volume_id, 'in-use') + + def attach_volumes(self, server_name, volumes): + """ + Create and attach volumes to the named OpenStack instance. + If attachment is failed, make another try. + """ + for i in range(volumes['count']): + volume_name = server_name + '-' + str(i) + volume_id = None + with safe_while(sleep=10, tries=3, + action="volume " + volume_name) as proceed: + while proceed(): + try: + volume_id = self._create_volume(volume_name, volumes['size']) + self._await_volume_status(volume_id, 'available') + self._attach_volume(volume_id, server_name) + break + except Exception as e: + log.warning("%s" % e) + if volume_id: + OpenStack().volume_delete(volume_id) + + @staticmethod + def ip2name(prefix, ip): + """ + return the instance name suffixed with the IP address. + """ + digits = map(int, re.findall('(\d+)\.(\d+)\.(\d+)\.(\d+)', ip)[0]) + return prefix + "%03d%03d%03d%03d" % tuple(digits) + + def create(self, num, os_type, os_version, arch, resources_hint): + """ + Create num OpenStack instances running os_type os_version and + return their names. Each instance has at least the resources + described in resources_hint. + """ + log.debug('ProvisionOpenStack:create') + if arch is None: + arch = self.get_default_arch() + resources_hint = self.interpret_hints({ + 'machine': config['openstack']['machine'], + 'volumes': config['openstack']['volumes'], + }, resources_hint) + self.init_user_data(os_type, os_version) + image = self.image(os_type, os_version, arch) + if 'network' in config['openstack']: + net = "--nic net-id=" + str(self.net_id(config['openstack']['network'])) + else: + net = '' + flavor = self.flavor(resources_hint['machine'], arch) + keypair = config['openstack']['keypair'] or 'teuthology' + worker_group = config['openstack']['worker_group'] or 'teuthology-worker' + cmd = ("flock --close --timeout 28800 /tmp/teuthology-server-create.lock" + + " openstack server create" + + " " + config['openstack'].get('server-create', '') + + " -f json " + + " --image '" + str(image) + "'" + + " --flavor '" + str(flavor) + "'" + + " --key-name %s " % keypair + + " --user-data " + str(self.user_data) + + " " + net + + " --min " + str(num) + + " --max " + str(num) + + " --security-group %s" % worker_group + + " --property teuthology=" + self.property + + " --property ownedby=" + config.openstack['ip'] + + " --wait " + + " " + self.basename) + try: + self.run(cmd, type='compute') + except CalledProcessError as exc: + if "quota exceeded" in exc.output.lower(): + raise QuotaExceededError(message=exc.output) + raise + instances = filter( + lambda instance: self.property in instance['Properties'], + self.list_instances()) + instances = [OpenStackInstance(i['ID']) for i in instances] + fqdns = [] + try: + network = config['openstack'].get('network', '') + for instance in instances: + ip = instance.get_ip(network) + name = self.ip2name(self.basename, ip) + self.run("server set " + + "--name " + name + " " + + instance['ID']) + fqdn = name + '.' + config.lab_domain + if not misc.ssh_keyscan_wait(fqdn): + console_log = misc.sh("openstack console log show %s " + "|| true" % instance['ID']) + log.error(console_log) + raise ValueError('ssh_keyscan_wait failed for ' + fqdn) + time.sleep(15) + if not self.cloud_init_wait(instance): + raise ValueError('cloud_init_wait failed for ' + fqdn) + self.attach_volumes(name, resources_hint['volumes']) + fqdns.append(fqdn) + except Exception as e: + log.exception(str(e)) + for id in [instance['ID'] for instance in instances]: + self.destroy(id) + raise e + return fqdns + + def destroy(self, name_or_id): + log.debug('ProvisionOpenStack:destroy ' + name_or_id) + return OpenStackInstance(name_or_id).destroy() diff --git a/teuthology/provision/pelagos.py b/teuthology/provision/pelagos.py new file mode 100644 index 0000000000..5dd04a4fae --- /dev/null +++ b/teuthology/provision/pelagos.py @@ -0,0 +1,173 @@ + +import logging +import requests +import re +import time + +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.misc import canonicalize_hostname +from teuthology.util.compat import HTTPError + +log = logging.getLogger(__name__) +config_section = 'pelagos' + +# Provisioner configuration section description see in +# docs/siteconfig.rst + +def enabled(warn=False): + """ + Check for required Pelagos settings + + :param warn: Whether or not to log a message containing unset parameters + :returns: True if they are present; False if they are not + """ + conf = config.get(config_section, dict()) + params = ['endpoint', 'machine_types'] + unset = [_ for _ in params if not conf.get(_)] + if unset and warn: + log.warning( + "Pelagos is disabled; set the following config options to enable: %s", + ' '.join(unset), + ) + return (unset == []) + + +def get_types(): + """ + Fetch and parse config.pelagos['machine_types'] + + :returns: The list of Pelagos-configured machine types. An empty list if Pelagos is + not configured. + """ + if not enabled(): + return [] + conf = config.get(config_section, dict()) + types = conf.get('machine_types', '') + if not isinstance(types, list): + types = [_ for _ in types.split(',') if _] + return [_ for _ in types if _] + +def park_node(name): + p = Pelagos(name, "maintenance_image") + p.create(wait=False) + + +class Pelagos(object): + + def __init__(self, name, os_type, os_version=""): + #for service should be a hostname, not a user@host + split_uri = re.search(r'(\w*)@(.+)', canonicalize_hostname(name)) + if split_uri is not None: + self.name = split_uri.groups()[1] + else: + self.name = name + + self.os_type = os_type + self.os_version = os_version + if os_version: + self.os_name = os_type + "-" + os_version + else: + self.os_name = os_type + self.log = log.getChild(self.name) + + def create(self, wait=True): + """ + Initiate deployment via REST requests and wait until completion + :param wait: optional, by default is True, if set to False, function + doesn't wait for the end of node provisioning + :returns: http response code if operation is successful + :raises: :class:`Exception`: if node provision failure reported by + Pelagos or if timeout is reached + :raises: :class:`RuntimeError`: if pelagos is not configured + + """ + if not enabled(): + raise RuntimeError("Pelagos is not configured!") + location = None + try: + params = dict(os=self.os_name, node=self.name) + response = self.do_request('node/provision', + data=params, method='POST') + if not wait: + return response + location = response.headers.get('Location') + self.log.debug("provision task: '%s'", location) + # gracefully wait till provision task gets created on pelagos + time.sleep(2) + self.log.info("Waiting for deploy to finish") + sleep_time=15 + with safe_while(sleep=sleep_time, tries=60) as proceed: + while proceed(): + if not self.is_task_active(location): + break + self.log.info('Sleeping %s seconds' % sleep_time) + except Exception as e: + if location: + self.cancel_deploy_task(location) + else: + self.log.error("Failed to start deploy tasks!") + raise e + self.log.info("Deploy complete!") + if self.task_status_response.status_code != 200: + raise Exception("Deploy failed") + return self.task_status_response + + def cancel_deploy_task(self, task_id): + # TODO implement it + return + + def is_task_active(self, task_url): + try: + status_response = self.do_request('', url=task_url, verify=False) + except HTTPError as err: + self.log.error("Task fail reason: '%s'", err.reason) + if err.status_code == 404: + self.log.error(err.reason) + self.task_status_response = 'failed' + return False + else: + raise HTTPError(err.code, err.reason) + self.log.debug("Response code '%s'", + str(status_response.status_code)) + self.task_status_response = status_response + if status_response.status_code == 202: + status = status_response.headers['status'] + self.log.debug("Status response: '%s'", status) + if status == 'not completed': + return True + return False + + def do_request(self, url_suffix, url="" , data=None, method='GET', verify=True): + """ + A convenience method to submit a request to the Pelagos server + :param url_suffix: The portion of the URL to append to the endpoint, + e.g. '/system/info' + :param data: Optional JSON data to submit with the request + :param method: The HTTP method to use for the request (default: 'GET') + :param verify: Whether or not to raise an exception if the request is + unsuccessful (default: True) + :returns: A requests.models.Response object + """ + prepared_url = url or config.pelagos['endpoint'] + url_suffix + self.log.debug("Sending %s request to: '%s'", method, prepared_url) + if data: + self.log.debug("Using data: '%s'", str(data)) + req = requests.Request( + method, + prepared_url, + data=data + ) + prepared = req.prepare() + resp = requests.Session().send(prepared) + if not resp.ok and resp.text: + self.log.error("Returned status code: '%s', text: %s", + resp.status_code, resp.text or 'Empty') + if verify: + resp.raise_for_status() + return resp + + def destroy(self): + """A no-op; we just leave idle nodes as-is""" + pass + diff --git a/teuthology/provision/test/test_downburst.py b/teuthology/provision/test/test_downburst.py new file mode 100644 index 0000000000..cc947b2850 --- /dev/null +++ b/teuthology/provision/test/test_downburst.py @@ -0,0 +1,105 @@ +from mock import Mock, MagicMock, patch + +from teuthology import provision + + +class TestDownburst(object): + def setup(self): + self.ctx = Mock() + self.ctx.os_type = 'rhel' + self.ctx.os_version = '7.0' + self.ctx.config = dict() + self.name = 'vpm999' + self.status = dict( + vm_host=dict(name='host999'), + is_vm=True, + machine_type='mtype', + ) + + def test_create_if_vm_success(self): + name = self.name + ctx = self.ctx + status = self.status + + dbrst = provision.downburst.Downburst( + name, ctx.os_type, ctx.os_version, status) + dbrst.executable = '/fake/path' + dbrst.build_config = MagicMock(name='build_config') + dbrst._run_create = MagicMock(name='_run_create') + dbrst._run_create.return_value = (0, '', '') + remove_config = MagicMock(name='remove_config') + dbrst.remove_config = remove_config + + result = provision.create_if_vm(ctx, name, dbrst) + assert result is True + + dbrst._run_create.assert_called_with() + dbrst.build_config.assert_called_with() + del dbrst + remove_config.assert_called_with() + + def test_destroy_if_vm_success(self): + name = self.name + ctx = self.ctx + status = self.status + + dbrst = provision.downburst.Downburst( + name, ctx.os_type, ctx.os_version, status) + dbrst.destroy = MagicMock(name='destroy') + dbrst.destroy.return_value = True + + result = provision.destroy_if_vm(ctx, name, _downburst=dbrst) + assert result is True + + dbrst.destroy.assert_called_with() + + def test_destroy_if_vm_wrong_owner(self): + name = self.name + ctx = self.ctx + status = self.status + status['locked_by'] = 'user@a' + + dbrst = provision.downburst.Downburst( + name, ctx.os_type, ctx.os_version, status) + dbrst.destroy = MagicMock(name='destroy', side_effect=RuntimeError) + + result = provision.destroy_if_vm(ctx, name, user='user@b', + _downburst=dbrst) + assert result is False + + def test_destroy_if_vm_wrong_description(self): + name = self.name + ctx = self.ctx + status = self.status + status['description'] = 'desc_a' + + dbrst = provision.downburst.Downburst( + name, ctx.os_type, ctx.os_version, status) + dbrst.destroy = MagicMock(name='destroy') + dbrst.destroy = MagicMock(name='destroy', side_effect=RuntimeError) + + result = provision.destroy_if_vm(ctx, name, description='desc_b', + _downburst=dbrst) + assert result is False + + @patch('teuthology.provision.downburst.downburst_executable') + def test_create_fails_without_executable(self, m_exec): + name = self.name + ctx = self.ctx + status = self.status + m_exec.return_value = '' + dbrst = provision.downburst.Downburst( + name, ctx.os_type, ctx.os_version, status) + result = dbrst.create() + assert result is False + + @patch('teuthology.provision.downburst.downburst_executable') + def test_destroy_fails_without_executable(self, m_exec): + name = self.name + ctx = self.ctx + status = self.status + m_exec.return_value = '' + dbrst = provision.downburst.Downburst( + name, ctx.os_type, ctx.os_version, status) + result = dbrst.destroy() + assert result is False diff --git a/teuthology/provision/test/test_fog.py b/teuthology/provision/test/test_fog.py new file mode 100644 index 0000000000..9119f74f1e --- /dev/null +++ b/teuthology/provision/test/test_fog.py @@ -0,0 +1,317 @@ +from copy import deepcopy +from datetime import datetime +from mock import patch, DEFAULT, PropertyMock +from pytest import raises, mark + +from teuthology.config import config +from teuthology.exceptions import MaxWhileTries, CommandFailedError +from teuthology.provision import fog + + +test_config = dict(fog=dict( + endpoint='http://fog.example.com/fog', + api_token='API_TOKEN', + user_token='USER_TOKEN', + machine_types='type1,type2', +)) + + +class TestFOG(object): + klass = fog.FOG + + def setup(self): + config.load() + config.update(deepcopy(test_config)) + self.start_patchers() + + def start_patchers(self): + self.patchers = dict() + self.patchers['m_sleep'] = patch( + 'time.sleep', + ) + self.patchers['m_requests_Session_send'] = patch( + 'requests.Session.send', + ) + self.patchers['m_Remote_connect'] = patch( + 'teuthology.orchestra.remote.Remote.connect' + ) + self.patchers['m_Remote_run'] = patch( + 'teuthology.orchestra.remote.Remote.run' + ) + self.patchers['m_Remote_console'] = patch( + 'teuthology.orchestra.remote.Remote.console', + new_callable=PropertyMock, + ) + self.patchers['m_Remote_hostname'] = patch( + 'teuthology.orchestra.remote.Remote.hostname', + new_callable=PropertyMock, + ) + self.patchers['m_Remote_machine_type'] = patch( + 'teuthology.orchestra.remote.Remote.machine_type', + new_callable=PropertyMock, + ) + self.mocks = dict() + for name, patcher in self.patchers.items(): + self.mocks[name] = patcher.start() + + def teardown(self): + for patcher in self.patchers.values(): + patcher.stop() + + @mark.parametrize('enabled', [True, False]) + def test_get_types(self, enabled): + with patch('teuthology.provision.fog.enabled') as m_enabled: + m_enabled.return_value = enabled + types = fog.get_types() + if enabled: + assert types == test_config['fog']['machine_types'].split(',') + else: + assert types == [] + + def test_disabled(self): + config.fog['endpoint'] = None + obj = self.klass('name.fqdn', 'type', '1.0') + with raises(RuntimeError): + obj.create() + + def test_init(self): + self.mocks['m_Remote_hostname'].return_value = 'name.fqdn' + obj = self.klass('name.fqdn', 'type', '1.0') + assert obj.name == 'name.fqdn' + assert obj.shortname == 'name' + assert obj.os_type == 'type' + assert obj.os_version == '1.0' + + @mark.parametrize('success', [True, False]) + def test_create(self, success): + self.mocks['m_Remote_hostname'].return_value = 'name.fqdn' + self.mocks['m_Remote_machine_type'].return_value = 'type1' + obj = self.klass('name.fqdn', 'type', '1.0') + host_id = 99 + with patch.multiple( + 'teuthology.provision.fog.FOG', + get_host_data=DEFAULT, + set_image=DEFAULT, + schedule_deploy_task=DEFAULT, + wait_for_deploy_task=DEFAULT, + cancel_deploy_task=DEFAULT, + _wait_for_ready=DEFAULT, + _fix_hostname=DEFAULT, + ) as local_mocks: + local_mocks['get_host_data'].return_value = dict(id=host_id) + if not success: + local_mocks['wait_for_deploy_task'].side_effect = RuntimeError + with raises(RuntimeError): + obj.create() + else: + obj.create() + assert local_mocks['get_host_data'].called_once_with() + assert local_mocks['set_image'].called_once_with(host_id) + assert local_mocks['schedule_deploy_task']\ + .called_once_with(host_id) + assert local_mocks['wait_for_deploy_task'].called_once_with() + if success: + assert local_mocks['_wait_for_ready'].called_once_with() + assert local_mocks['_fix_hostname'].called_once_with() + else: + assert len(local_mocks['cancel_deploy_task'].call_args_list) == 1 + assert self.mocks['m_Remote_console']\ + .return_value.power_off.called_once_with() + assert self.mocks['m_Remote_console']\ + .return_value.power_on.called_once_with() + + def test_do_request(self): + obj = self.klass('name.fqdn', 'type', '1.0') + obj.do_request('test_url', data='DATA', method='GET') + assert len(self.mocks['m_requests_Session_send'].call_args_list) == 1 + req = self.mocks['m_requests_Session_send'].call_args_list[0][0][0] + assert req.url == test_config['fog']['endpoint'] + 'test_url' + assert req.method == 'GET' + assert req.headers['fog-api-token'] == test_config['fog']['api_token'] + assert req.headers['fog-user-token'] == test_config['fog']['user_token'] + assert req.body == 'DATA' + + @mark.parametrize( + 'count', + [0, 1, 2], + ) + def test_get_host_data(self, count): + host_objs = [dict(id=i) for i in range(count)] + resp_obj = dict(count=count, hosts=host_objs) + self.mocks['m_requests_Session_send']\ + .return_value.json.return_value = resp_obj + obj = self.klass('name.fqdn', 'type', '1.0') + if count != 1: + with raises(RuntimeError): + result = obj.get_host_data() + return + result = obj.get_host_data() + assert len(self.mocks['m_requests_Session_send'].call_args_list) == 1 + req = self.mocks['m_requests_Session_send'].call_args_list[0][0][0] + assert req.url == test_config['fog']['endpoint'] + '/host' + assert req.body == '{"name": "name"}' + assert result == host_objs[0] + + @mark.parametrize( + 'count', + [0, 1, 2], + ) + def test_get_image_data(self, count): + img_objs = [dict(id=i) for i in range(count)] + resp_obj = dict(count=count, images=img_objs) + self.mocks['m_requests_Session_send']\ + .return_value.json.return_value = resp_obj + self.mocks['m_Remote_machine_type'].return_value = 'type1' + obj = self.klass('name.fqdn', 'windows', 'xp') + if count < 1: + with raises(RuntimeError): + result = obj.get_image_data() + return + result = obj.get_image_data() + assert len(self.mocks['m_requests_Session_send'].call_args_list) == 1 + req = self.mocks['m_requests_Session_send'].call_args_list[0][0][0] + assert req.url == test_config['fog']['endpoint'] + '/image' + assert req.body == '{"name": "type1_windows_xp"}' + assert result == img_objs[0] + + def test_set_image(self): + self.mocks['m_Remote_hostname'].return_value = 'name.fqdn' + self.mocks['m_Remote_machine_type'].return_value = 'type1' + host_id = 999 + obj = self.klass('name.fqdn', 'type', '1.0') + with patch.multiple( + 'teuthology.provision.fog.FOG', + get_image_data=DEFAULT, + do_request=DEFAULT, + ) as local_mocks: + local_mocks['get_image_data'].return_value = dict(id='13') + obj.set_image(host_id) + assert local_mocks['do_request'].called_once_with( + '/host/999', 'put', '{"imageID": "13"}', + ) + + def test_schedule_deploy_task(self): + host_id = 12 + tasktype_id = 6 + task_id = 5 + tasktype_result = dict(tasktypes=[dict(name='deploy', id=tasktype_id)]) + schedule_result = dict() + host_tasks = [dict( + createdTime=datetime.strftime( + datetime.utcnow(), self.klass.timestamp_format), + id=task_id, + )] + self.mocks['m_requests_Session_send']\ + .return_value.json.side_effect = [ + tasktype_result, schedule_result, + ] + with patch.multiple( + 'teuthology.provision.fog.FOG', + get_deploy_tasks=DEFAULT, + ) as local_mocks: + local_mocks['get_deploy_tasks'].return_value = host_tasks + obj = self.klass('name.fqdn', 'type', '1.0') + result = obj.schedule_deploy_task(host_id) + assert local_mocks['get_deploy_tasks'].called_once_with() + assert len(self.mocks['m_requests_Session_send'].call_args_list) == 3 + assert result == task_id + + def test_get_deploy_tasks(self): + obj = self.klass('name.fqdn', 'type', '1.0') + resp_obj = dict( + count=2, + tasks=[ + dict(host=dict(name='notme')), + dict(host=dict(name='name')), + ] + ) + self.mocks['m_requests_Session_send']\ + .return_value.json.return_value = resp_obj + result = obj.get_deploy_tasks() + assert result[0]['host']['name'] == 'name' + + @mark.parametrize( + 'active_ids', + [ + [2, 4, 6, 8], + [1], + [], + ] + ) + def test_deploy_task_active(self, active_ids): + our_task_id = 4 + result_objs = [dict(id=task_id) for task_id in active_ids] + obj = self.klass('name.fqdn', 'type', '1.0') + with patch.multiple( + 'teuthology.provision.fog.FOG', + get_deploy_tasks=DEFAULT, + ) as local_mocks: + local_mocks['get_deploy_tasks'].return_value = result_objs + result = obj.deploy_task_active(our_task_id) + assert result is (our_task_id in active_ids) + + @mark.parametrize( + 'tries', + [3, 61], + ) + def test_wait_for_deploy_task(self, tries): + wait_results = [True for i in range(tries)] + [False] + obj = self.klass('name.fqdn', 'type', '1.0') + with patch.multiple( + 'teuthology.provision.fog.FOG', + deploy_task_active=DEFAULT, + ) as local_mocks: + local_mocks['deploy_task_active'].side_effect = wait_results + if tries >= 60: + with raises(MaxWhileTries): + obj.wait_for_deploy_task(9) + return + obj.wait_for_deploy_task(9) + assert len(local_mocks['deploy_task_active'].call_args_list) == \ + tries + 1 + + def test_cancel_deploy_task(self): + obj = self.klass('name.fqdn', 'type', '1.0') + with patch.multiple( + 'teuthology.provision.fog.FOG', + do_request=DEFAULT, + ) as local_mocks: + obj.cancel_deploy_task(10) + assert local_mocks['do_request'].called_once_with( + '/task/cancel', + method='DELETE', + data='{"id": 10}', + ) + + @mark.parametrize( + 'tries', + [1, 101], + ) + def test_wait_for_ready_tries(self, tries): + connect_results = [MaxWhileTries for i in range(tries)] + [True] + obj = self.klass('name.fqdn', 'type', '1.0') + self.mocks['m_Remote_connect'].side_effect = connect_results + if tries >= 100: + with raises(MaxWhileTries): + obj._wait_for_ready() + return + obj._wait_for_ready() + assert len(self.mocks['m_Remote_connect'].call_args_list) == tries + 1 + + @mark.parametrize( + 'sentinel_present', + ([False, True]), + ) + def test_wait_for_ready_sentinel(self, sentinel_present): + config.fog['sentinel_file'] = '/a_file' + obj = self.klass('name.fqdn', 'type', '1.0') + if not sentinel_present: + self.mocks['m_Remote_run'].side_effect = [ + CommandFailedError(command='cmd', exitstatus=1)] + with raises(CommandFailedError): + obj._wait_for_ready() + else: + obj._wait_for_ready() + assert len(self.mocks['m_Remote_run'].call_args_list) == 1 + assert "'/a_file'" in \ + self.mocks['m_Remote_run'].call_args_list[0][1]['args'] diff --git a/teuthology/provision/test/test_init_provision.py b/teuthology/provision/test/test_init_provision.py new file mode 100644 index 0000000000..390385037a --- /dev/null +++ b/teuthology/provision/test/test_init_provision.py @@ -0,0 +1,46 @@ +from copy import deepcopy +from pytest import raises +from teuthology.config import config + +import teuthology.provision + +test_config = dict( + pelagos=dict( + endpoint='http://pelagos.example:5000/', + machine_types='ptype1,ptype2,common_type', + ), + fog=dict( + endpoint='http://fog.example.com/fog', + api_token='API_TOKEN', + user_token='USER_TOKEN', + machine_types='ftype1,ftype2,common_type', + ) +) + +class TestInitProvision(object): + + def setup(self): + config.load(deepcopy(test_config)) + + def test_get_reimage_types(self): + reimage_types = teuthology.provision.get_reimage_types() + assert reimage_types == ["ptype1", "ptype2", "common_type", + "ftype1", "ftype2", "common_type"] + + def test_reimage(self): + class context: + pass + ctx = context() + ctx.os_type = 'sle' + ctx.os_version = '15.1' + with raises(Exception) as e_info: + teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'not-defined-type') + e_str = str(e_info) + print("Caught exception: " + e_str) + assert e_str.find("configured\sprovisioners") == -1 + + with raises(Exception) as e_info: + teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'common_type') + e_str = str(e_info) + print("Caught exception: " + e_str) + assert e_str.find("used\swith\sone\sprovisioner\sonly") == -1 diff --git a/teuthology/provision/test/test_pelagos.py b/teuthology/provision/test/test_pelagos.py new file mode 100644 index 0000000000..a8969d4b4f --- /dev/null +++ b/teuthology/provision/test/test_pelagos.py @@ -0,0 +1,46 @@ +from copy import deepcopy +from pytest import raises +from teuthology.config import config +from teuthology.provision import pelagos + +import teuthology.provision + + +test_config = dict( + pelagos=dict( + endpoint='http://pelagos.example:5000/', + machine_types='ptype1,ptype2', + ), +) + +class TestPelagos(object): + + def setup(self): + config.load(deepcopy(test_config)) + + def teardown(self): + pass + + def test_get_types(self): + #klass = pelagos.Pelagos + types = pelagos.get_types() + assert types == ["ptype1", "ptype2"] + + def test_disabled(self): + config.pelagos['endpoint'] = None + enabled = pelagos.enabled() + assert enabled == False + + def test_pelagos(self): + class context: + pass + + ctx = context() + ctx.os_type ='sle' + ctx.os_version = '15.1' + with raises(Exception) as e_info: + teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'ptype1') + e_str = str(e_info) + print("Caught exception: " + e_str) + assert e_str.find("Name\sor\sservice\snot\sknown") == -1 + diff --git a/teuthology/prune.py b/teuthology/prune.py new file mode 100644 index 0000000000..dc720cb1ea --- /dev/null +++ b/teuthology/prune.py @@ -0,0 +1,237 @@ +import gzip +import logging +import os +import shutil +import time + +import teuthology +from teuthology.contextutil import safe_while + +log = logging.getLogger(__name__) + + +# If we see this in any directory, we do not prune it +PRESERVE_FILE = '.preserve' + + +def main(args): + """ + Main function; parses args and calls prune_archive() + """ + verbose = args['--verbose'] + if verbose: + teuthology.log.setLevel(logging.DEBUG) + archive_dir = args['--archive'] + dry_run = args['--dry-run'] + pass_days = int(args['--pass']) + fail_days = int(args['--fail']) + remotes_days = int(args['--remotes']) + compress_days = int(args['--compress']) + + prune_archive( + archive_dir, pass_days, fail_days, remotes_days, compress_days, dry_run + ) + + +def prune_archive( + archive_dir, + pass_days, + fail_days, + remotes_days, + compress_days, + dry_run=False, +): + """ + Walk through the archive_dir, calling the cleanup functions to process + directories that might be old enough + """ + min_days = min(filter( + lambda n: n >= 0, [pass_days, fail_days, remotes_days])) + log.debug("Archive {archive} has {count} children".format( + archive=archive_dir, count=len(os.listdir(archive_dir)))) + # Use full paths + children = [os.path.join(archive_dir, p) for p in listdir(archive_dir)] + run_dirs = list() + for child in children: + # Ensure that the path is not a symlink, is a directory, and is old + # enough to process + if (not os.path.islink(child) and os.path.isdir(child) and + is_old_enough(child, min_days)): + run_dirs.append(child) + run_dirs.sort(key=lambda p: os.path.getctime(p), reverse=True) + for run_dir in run_dirs: + log.debug("Processing %s ..." % run_dir) + maybe_remove_jobs(run_dir, pass_days, fail_days, dry_run) + maybe_remove_remotes(run_dir, remotes_days, dry_run) + maybe_compress_logs(run_dir, compress_days, dry_run) + + +def listdir(path): + with safe_while(sleep=1, increment=1, tries=10) as proceed: + while proceed(): + try: + return os.listdir(path) + except OSError: + log.exception("Failed to list %s !" % path) + + +def should_preserve(dir_name): + """ + Should the directory be preserved? + + :returns: True if the directory contains a file named '.preserve'; False + otherwise + """ + preserve_path = os.path.join(dir_name, PRESERVE_FILE) + if os.path.isdir(dir_name) and os.path.exists(preserve_path): + return True + return False + + +def is_old_enough(file_name, days): + """ + :returns: True if the file's modification date is earlier than the amount + of days specified + """ + if days < 0: + return False + now = time.time() + secs_to_days = lambda s: s / (60 * 60 * 24) + age = now - os.path.getmtime(file_name) + if secs_to_days(age) > days: + return True + return False + + +def remove(path): + """ + Attempt to recursively remove a directory. If an OSError is encountered, + log it and continue. + """ + try: + shutil.rmtree(path) + except OSError: + log.exception("Failed to remove %s !" % path) + + +def maybe_remove_jobs(run_dir, pass_days, fail_days, dry_run=False): + """ + Remove entire job log directories if they are old enough and the job passed + """ + if pass_days < 0 and fail_days < 0: + return + contents = listdir(run_dir) + if PRESERVE_FILE in contents: + return + for child in contents: + job_path = os.path.join(run_dir, child) + # Ensure the path isn't marked for preservation and that it is a + # directory + if should_preserve(job_path) or not os.path.isdir(job_path): + continue + # Is it a job dir? + summary_path = os.path.join(job_path, 'summary.yaml') + if not os.path.exists(summary_path): + continue + # Depending on whether it passed or failed, we have a different age + # threshold + summary_lines = [line.strip() for line in + open(summary_path).readlines()] + if 'success: true' in summary_lines: + status = 'passed' + days = pass_days + elif 'success: false' in summary_lines: + status = 'failed' + days = fail_days + else: + continue + # Ensure the directory is old enough to remove + if not is_old_enough(summary_path, days): + continue + log.info("{job} is a {days}-day old {status} job; removing".format( + job=job_path, days=days, status=status)) + if not dry_run: + remove(job_path) + + +def maybe_remove_remotes(run_dir, days, dry_run=False): + """ + Remove remote logs (not teuthology logs) from job directories if they are + old enough + """ + if days < 0: + return + contents = listdir(run_dir) + subdirs = dict( + remote='remote logs', + data='mon data', + ) + if PRESERVE_FILE in contents: + return + for child in contents: + item = os.path.join(run_dir, child) + # Ensure the path isn't marked for preservation, that it is a + # directory, and that it is old enough + if (should_preserve(item) or not os.path.isdir(item) or not + is_old_enough(item, days)): + continue + for (subdir, description) in subdirs.items(): + _maybe_remove_subdir(item, subdir, days, description, dry_run) + + +def _maybe_remove_subdir(job_dir, subdir, days, description, dry_run=False): + # Does the subdir exist? + subdir_path = os.path.join(job_dir, subdir) + if not os.path.isdir(subdir_path): + return + log.info("{job} is {days} days old; removing {desc}".format( + job=job_dir, + days=days, + desc=description, + )) + if not dry_run: + remove(subdir_path) + + +def maybe_compress_logs(run_dir, days, dry_run=False): + if days < 0: + return + contents = listdir(run_dir) + if PRESERVE_FILE in contents: + return + for child in contents: + item = os.path.join(run_dir, child) + # Ensure the path isn't marked for preservation, that it is a + # directory, and that it is old enough + if (should_preserve(item) or not os.path.isdir(item) or not + is_old_enough(item, days)): + continue + log_name = 'teuthology.log' + log_path = os.path.join(item, log_name) + if not os.path.exists(log_path): + continue + log.info("{job} is {days} days old; compressing {name}".format( + job=item, + days=days, + name=log_name, + )) + if dry_run: + continue + zlog_path = log_path + '.gz' + try: + _compress(log_path, zlog_path) + except Exception: + log.exception("Failed to compress %s", log_path) + os.remove(zlog_path) + else: + os.remove(log_path) + + +def _compress(in_path, out_path): + """ + Compresses a file using gzip, preserving the original permissions, atime, + and mtime. Does not remove the original. + """ + with open(in_path, 'rb') as src, gzip.open(out_path, 'wb') as dest: + shutil.copyfileobj(src, dest) + shutil.copystat(in_path, out_path) diff --git a/teuthology/reimage.py b/teuthology/reimage.py new file mode 100644 index 0000000000..fdc90543a6 --- /dev/null +++ b/teuthology/reimage.py @@ -0,0 +1,57 @@ +import argparse +import logging + +import teuthology + +from teuthology.parallel import parallel +from teuthology.provision import reimage, get_reimage_types +from teuthology.lock import query, ops +from teuthology.misc import get_user +from teuthology.misc import decanonicalize_hostname as shortname + +log = logging.getLogger(__name__) + +def main(args): + if (args['--verbose']): + teuthology.log.setLevel(logging.DEBUG) + + ctx = argparse.Namespace() + ctx.os_type = args['--os-type'] + ctx.os_version = args['--os-version'] + + nodes = args[''] + + reimage_types = get_reimage_types() + statuses = query.get_statuses(nodes) + owner = args['--owner'] or get_user() + unlocked = [shortname(_['name']) + for _ in statuses if not _['locked']] + if unlocked: + log.error( + "Some of the nodes are not locked: %s", unlocked) + exit(1) + + improper = [shortname(_['name']) for _ in statuses if _['locked_by'] != owner] + if improper: + log.error( + "Some of the nodes are not owned by '%s': %s", owner, improper) + exit(1) + + irreimageable = [shortname(_['name']) for _ in statuses + if _['machine_type'] not in reimage_types] + if irreimageable: + log.error( + "Following nodes cannot be reimaged because theirs machine type " + "is not reimageable: %s", irreimageable) + exit(1) + + def reimage_node(ctx, machine_name, machine_type): + ops.update_nodes([machine_name], True) + reimage(ctx, machine_name, machine_type) + ops.update_nodes([machine_name]) + log.debug("Node '%s' reimaging is complete", machine_name) + + with parallel() as p: + for node in statuses: + log.debug("Start node '%s' reimaging", node['name']) + p.spawn(reimage_node, ctx, shortname(node['name']), node['machine_type']) diff --git a/teuthology/repo_utils.py b/teuthology/repo_utils.py new file mode 100644 index 0000000000..ffccc00f3a --- /dev/null +++ b/teuthology/repo_utils.py @@ -0,0 +1,461 @@ +import logging +import os +import re +import shutil +import subprocess +import time + +from teuthology import misc +from teuthology.util.flock import FileLock +from teuthology.config import config +from teuthology.contextutil import MaxWhileTries, safe_while +from teuthology.exceptions import BootstrapError, BranchNotFoundError, CommitNotFoundError, GitError + +log = logging.getLogger(__name__) + + +# Repos must not have been fetched in the last X seconds to get fetched again. +# Similar for teuthology's bootstrap +FRESHNESS_INTERVAL = 60 + + +def touch_file(path): + out = subprocess.check_output(('touch', path)) + if out: + log.info(out) + + +def is_fresh(path): + """ + Has this file been modified in the last FRESHNESS_INTERVAL seconds? + + Returns False if the file does not exist + """ + if not os.path.exists(path): + return False + elif time.time() - os.stat(path).st_mtime < FRESHNESS_INTERVAL: + return True + return False + + +def build_git_url(project, project_owner='ceph'): + """ + Return the git URL to clone the project + """ + if project == 'ceph-qa-suite': + base = config.get_ceph_qa_suite_git_url() + elif project == 'ceph-cm-ansible': + base = config.get_ceph_cm_ansible_git_url() + elif project == 'ceph': + base = config.get_ceph_git_url() + else: + base = 'https://github.com/{project_owner}/{project}' + url_templ = re.sub('\.git$', '', base) + return url_templ.format(project_owner=project_owner, project=project) + + +def ls_remote(url, ref): + """ + Return the current sha1 for a given repository and ref + + :returns: The sha1 if found; else None + """ + sha1 = None + cmd = "git ls-remote {} {}".format(url, ref) + result = subprocess.check_output( + cmd, shell=True).split() + if result: + sha1 = result[0].decode() + log.debug("{} -> {}".format(cmd, sha1)) + return sha1 + + +def current_branch(path: str) -> str: + """ + Return the current branch for a given on-disk repository. + + :returns: the current branch, or an empty string if none is found. + """ + # git branch --show-current was added in 2.22.0, and we can't assume + # our version is new enough. + cmd = "git rev-parse --abbrev-ref HEAD" + result = subprocess.Popen( + cmd, + shell=True, + cwd=path, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + ).communicate()[0].strip().decode() + if result == "HEAD": + return "" + return result + + +def enforce_repo_state(repo_url, dest_path, branch, commit=None, remove_on_error=True): + """ + Use git to either clone or update a given repo, forcing it to switch to the + specified branch. + + :param repo_url: The full URL to the repo (not including the branch) + :param dest_path: The full path to the destination directory + :param branch: The branch. + :param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch. + :param remove_on_error: Whether or not to remove dest_dir when an error occurs + :raises: BranchNotFoundError if the branch is not found; + CommitNotFoundError if the commit is not found; + GitError for other errors + """ + validate_branch(branch) + sentinel = os.path.join(dest_path, '.fetched') + # sentinel to track whether the repo has checked out the intended + # version, in addition to being cloned + repo_reset = os.path.join(dest_path, '.fetched_and_reset') + try: + if not os.path.isdir(dest_path): + clone_repo(repo_url, dest_path, branch, shallow=commit is None) + elif not commit and not is_fresh(sentinel): + set_remote(dest_path, repo_url) + fetch_branch(dest_path, branch) + touch_file(sentinel) + else: + log.info("%s was just updated or references a specific commit; assuming it is current", dest_path) + + if commit and os.path.exists(repo_reset): + return + + reset_repo(repo_url, dest_path, branch, commit) + touch_file(repo_reset) + # remove_pyc_files(dest_path) + except (BranchNotFoundError, CommitNotFoundError): + if remove_on_error: + shutil.rmtree(dest_path, ignore_errors=True) + raise + + +def clone_repo(repo_url, dest_path, branch, shallow=True): + """ + Clone a repo into a path + + :param repo_url: The full URL to the repo (not including the branch) + :param dest_path: The full path to the destination directory + :param branch: The branch. + :param shallow: Whether to perform a shallow clone (--depth 1) + :raises: BranchNotFoundError if the branch is not found; + GitError for other errors + """ + validate_branch(branch) + log.info("Cloning %s %s from upstream", repo_url, branch) + if branch.startswith('refs/'): + clone_repo_ref(repo_url, dest_path, branch) + return + args = ['git', 'clone', '--single-branch'] + if shallow: + args.extend(['--depth', '1']) + args.extend(['--branch', branch, repo_url, dest_path]) + proc = subprocess.Popen( + args, + cwd=os.path.dirname(dest_path), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + not_found_str = "Remote branch %s not found" % branch + out = proc.stdout.read().decode() + result = proc.wait() + # Newer git versions will bail if the branch is not found, but older ones + # will not. Fortunately they both output similar text. + if result != 0: + log.error(out) + if not_found_str in out: + if result == 0: + # Old git left a repo with the wrong branch. Remove it. + shutil.rmtree(dest_path, ignore_errors=True) + raise BranchNotFoundError(branch, repo_url) + elif result != 0: + # Unknown error + raise GitError("git clone failed!") + + +def rsstrip(s, suffix): + return s[:-len(suffix)] if s.endswith(suffix) else s + + +def lsstrip(s, prefix): + return s[len(prefix):] if s.startswith(prefix) else s + + +def remote_ref_from_ref(ref, remote='origin'): + if ref.startswith('refs/pull/'): + return 'refs/remotes/' + remote + lsstrip(ref, 'refs') + elif ref.startswith('refs/heads/'): + return 'refs/remotes/' + remote + lsstrip(ref, 'refs/heads') + raise GitError("Unsupported ref '%s'" % ref) + + +def local_branch_from_ref(ref): + if ref.startswith('refs/pull/'): + s = lsstrip(ref, 'refs/pull/') + s = rsstrip(s, '/merge') + s = rsstrip(s, '/head') + return "PR#%s" % s + elif ref.startswith('refs/heads/'): + return lsstrip(ref, 'refs/heads/') + raise GitError("Unsupported ref '%s', try 'refs/heads/' or 'refs/pull/'" % ref) + + +def fetch_refspec(ref): + if '/' in ref: + remote_ref = remote_ref_from_ref(ref) + return "+%s:%s" % (ref, remote_ref) + else: + # looks like a branch name + return ref + + +def clone_repo_ref(repo_url, dest_path, ref): + branch_name = local_branch_from_ref(ref) + remote_ref = remote_ref_from_ref(ref) + misc.sh('git init %s' % dest_path) + misc.sh('git remote add origin %s' % repo_url, cwd=dest_path) + #misc.sh('git fetch --depth 1 origin %s' % fetch_refspec(ref), + # cwd=dest_path) + fetch_branch(dest_path, ref) + misc.sh('git checkout -b %s %s' % (branch_name, remote_ref), + cwd=dest_path) + + +def set_remote(repo_path, repo_url): + """ + Call "git remote set-url origin " + + :param repo_url: The full URL to the repo (not including the branch) + :param repo_path: The full path to the repository + :raises: GitError if the operation fails + """ + log.debug("Setting repo remote to %s", repo_url) + proc = subprocess.Popen( + ('git', 'remote', 'set-url', 'origin', repo_url), + cwd=repo_path, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + if proc.wait() != 0: + out = proc.stdout.read() + log.error(out) + raise GitError("git remote set-url failed!") + + +def fetch(repo_path): + """ + Call "git fetch -p origin" + + :param repo_path: The full path to the repository + :raises: GitError if the operation fails + """ + log.info("Fetching from upstream into %s", repo_path) + proc = subprocess.Popen( + ('git', 'fetch', '-p', 'origin'), + cwd=repo_path, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + if proc.wait() != 0: + out = proc.stdout.read().decode() + log.error(out) + raise GitError("git fetch failed!") + + +def fetch_branch(repo_path, branch, shallow=True): + """ + Call "git fetch -p origin " + + :param repo_path: The full path to the repository on-disk + :param branch: The branch. + :param shallow: Whether to perform a shallow fetch (--depth 1) + :raises: BranchNotFoundError if the branch is not found; + GitError for other errors + """ + validate_branch(branch) + log.info("Fetching %s from origin", branch) + args = ['git', 'fetch'] + if shallow: + args.extend(['--depth', '1']) + args.extend(['-p', 'origin', fetch_refspec(branch)]) + proc = subprocess.Popen( + args, + cwd=repo_path, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + if proc.wait() != 0: + not_found_str = "fatal: couldn't find remote ref %s" % branch + out = proc.stdout.read().decode() + log.error(out) + if not_found_str in out.lower(): + raise BranchNotFoundError(branch) + else: + raise GitError("git fetch failed!") + + +def reset_repo(repo_url, dest_path, branch, commit=None): + """ + + :param repo_url: The full URL to the repo (not including the branch) + :param dest_path: The full path to the destination directory + :param branch: The branch. + :param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch. + :raises: BranchNotFoundError if the branch is not found; + CommitNotFoundError if the commit is not found; + GitError for other errors + """ + validate_branch(branch) + if '/' in branch: + reset_branch = lsstrip(remote_ref_from_ref(branch), 'refs/remotes/') + else: + reset_branch = 'origin/%s' % branch + reset_ref = commit or reset_branch + log.info('Resetting repo at %s to %s', dest_path, reset_ref) + # This try/except block will notice if the requested branch doesn't + # exist, whether it was cloned or fetched. + try: + subprocess.check_output( + ('git', 'reset', '--hard', reset_ref), + cwd=dest_path, + ) + except subprocess.CalledProcessError: + if commit: + raise CommitNotFoundError(commit, repo_url) + raise BranchNotFoundError(branch, repo_url) + + +def remove_pyc_files(dest_path): + subprocess.check_call( + ['find', dest_path, '-name', '*.pyc', '-exec', 'rm', '{}', ';'] + ) + + +def validate_branch(branch): + if ' ' in branch: + raise ValueError("Illegal branch name: '%s'" % branch) + + +def fetch_repo(url, branch, commit=None, bootstrap=None, lock=True): + """ + Make sure we have a given project's repo checked out and up-to-date with + the current branch requested + + :param url: The URL to the repo + :param bootstrap: An optional callback function to execute. Gets passed a + dest_dir argument: the path to the repo on-disk. + :param branch: The branch we want + :param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch. + :returns: The destination path + """ + src_base_path = config.src_base_path + if not os.path.exists(src_base_path): + os.mkdir(src_base_path) + ref_dir = ref_to_dirname(commit or branch) + dirname = '%s_%s' % (url_to_dirname(url), ref_dir) + dest_path = os.path.join(src_base_path, dirname) + # only let one worker create/update the checkout at a time + lock_path = dest_path.rstrip('/') + '.lock' + with FileLock(lock_path, noop=not lock): + with safe_while(sleep=10, tries=60) as proceed: + try: + while proceed(): + try: + enforce_repo_state(url, dest_path, branch, commit) + if bootstrap: + sentinel = os.path.join(dest_path, '.bootstrapped') + if commit and os.path.exists(sentinel) or is_fresh(sentinel): + log.info( + "Skipping bootstrap as it was already done in the last %ss", + FRESHNESS_INTERVAL, + ) + break + bootstrap(dest_path) + touch_file(sentinel) + break + except GitError: + log.exception("Git error encountered; retrying") + except BootstrapError: + log.exception("Bootstrap error encountered; retrying") + except MaxWhileTries: + shutil.rmtree(dest_path, ignore_errors=True) + raise + return dest_path + + +def ref_to_dirname(branch): + if '/' in branch: + return local_branch_from_ref(branch) + else: + return branch + + +def url_to_dirname(url): + """ + Given a URL, returns a string that's safe to use as a directory name. + Examples: + + git@git.ceph.com/ceph-qa-suite.git -> git.ceph.com_ceph-qa-suite + git://git.ceph.com/ceph-qa-suite.git -> git.ceph.com_ceph-qa-suite + https://github.com/ceph/ceph -> github.com_ceph_ceph + https://github.com/liewegas/ceph.git -> github.com_liewegas_ceph + file:///my/dir/has/ceph.git -> my_dir_has_ceph + """ + # Strip protocol from left-hand side + string = re.match('(?:.*://|.*@)(.*)', url).groups()[0] + # Strip '.git' from the right-hand side + string = string.rstrip('.git') + # Replace certain characters with underscores + string = re.sub('[:/]', '_', string) + # Remove duplicate underscores + string = re.sub('_+', '_', string) + # Remove leading or trailing underscore + string = string.strip('_') + return string + + +def fetch_qa_suite(branch, commit=None, lock=True): + """ + Make sure ceph-qa-suite is checked out. + + :param branch: The branch to fetch + :param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch. + :returns: The destination path + """ + return fetch_repo(config.get_ceph_qa_suite_git_url(), + branch, commit, lock=lock) + + +def fetch_teuthology(branch, commit=None, lock=True): + """ + Make sure we have the correct teuthology branch checked out and up-to-date + + :param branch: The branch we want + :param commit: The sha1 to checkout. Defaults to None, which uses HEAD of the branch. + :returns: The destination path + """ + url = config.ceph_git_base_url + 'teuthology.git' + return fetch_repo(url, branch, commit, bootstrap_teuthology, lock) + + +def bootstrap_teuthology(dest_path): + log.info("Bootstrapping %s", dest_path) + # This magic makes the bootstrap script not attempt to clobber an + # existing virtualenv. But the branch's bootstrap needs to actually + # check for the NO_CLOBBER variable. + env = os.environ.copy() + env['NO_CLOBBER'] = '1' + cmd = './bootstrap' + boot_proc = subprocess.Popen(cmd, shell=True, cwd=dest_path, env=env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True) + out, err = boot_proc.communicate() + returncode = boot_proc.wait() + log.info("Bootstrap exited with status %s", returncode) + if returncode != 0: + for line in out.split("\n"): + log.warning(line.strip()) + venv_path = os.path.join(dest_path, 'virtualenv') + log.info("Removing %s", venv_path) + shutil.rmtree(venv_path, ignore_errors=True) + raise BootstrapError("Bootstrap failed!") diff --git a/teuthology/report.py b/teuthology/report.py new file mode 100644 index 0000000000..d7375f3616 --- /dev/null +++ b/teuthology/report.py @@ -0,0 +1,584 @@ +import os +import yaml +import json +import re +import requests +import logging +import random +import socket +from datetime import datetime + +import teuthology +from teuthology.config import config +from teuthology.contextutil import safe_while +from teuthology.job_status import get_status, set_status + +report_exceptions = (requests.exceptions.RequestException, socket.error) + + +def init_logging(): + """ + Set up logging for the module + + :returns: a logger + """ + log = logging.getLogger(__name__) + return log + + +def main(args): + run = args['--run'] + job = args['--job'] + dead = args['--dead'] + refresh = dead or args['--refresh'] + server = args['--server'] + if server: + config.results_server = server + if args['--verbose']: + teuthology.log.setLevel(logging.DEBUG) + + archive_base = os.path.abspath(os.path.expanduser(args['--archive'])) or \ + config.archive_base + save = not args['--no-save'] + + log = init_logging() + reporter = ResultsReporter(archive_base, save=save, refresh=refresh, + log=log) + if dead and not job: + for run_name in run: + try_mark_run_dead(run[0]) + elif dead and len(run) == 1 and job: + reporter.report_jobs(run[0], job, dead=True) + elif len(run) == 1 and job: + reporter.report_jobs(run[0], job) + elif run and len(run) > 1: + reporter.report_runs(run) + elif run: + reporter.report_run(run[0]) + elif args['--all-runs']: + reporter.report_all_runs() + + +class ResultsSerializer(object): + """ + This class exists to poke around in the archive directory doing things like + assembling lists of test runs, lists of their jobs, and merging sets of job + YAML files together to form JSON objects. + """ + yamls = ('orig.config.yaml', 'config.yaml', 'info.yaml', 'summary.yaml') + + def __init__(self, archive_base, log=None): + self.archive_base = archive_base or config.archive_base + self.log = log or init_logging() + + + def job_info(self, run_name, job_id, pretty=False, simple=False): + """ + Given a run name and job id, merge the job's YAML files together. + + :param run_name: The name of the run. + :param job_id: The job's id. + :param simple(bool): Read less data for speed (only orig.config.yaml/info.yaml) + :returns: A dict. + """ + job_archive_dir = os.path.join(self.archive_base, + run_name, + job_id) + job_info = {} + + if simple: + self.yamls = ('orig.config.yaml', 'info.yaml') + + for yaml_name in self.yamls: + yaml_path = os.path.join(job_archive_dir, yaml_name) + if not os.path.exists(yaml_path): + continue + with open(yaml_path) as yaml_file: + partial_info = yaml.safe_load(yaml_file) + if partial_info is not None: + job_info.update(partial_info) + + if 'job_id' not in job_info: + job_info['job_id'] = job_id + + if simple: + return job_info + + log_path = os.path.join(job_archive_dir, 'teuthology.log') + if os.path.exists(log_path): + mtime = int(os.path.getmtime(log_path)) + mtime_dt = datetime.fromtimestamp(mtime) + job_info['updated'] = str(mtime_dt) + + + return job_info + + def json_for_job(self, run_name, job_id, pretty=False): + """ + Given a run name and job id, merge the job's YAML files together to + create a JSON object. + + :param run_name: The name of the run. + :param job_id: The job's id. + :returns: A JSON object. + """ + job_info = self.job_info(run_name, job_id, pretty) + if pretty: + job_json = json.dumps(job_info, sort_keys=True, indent=4) + else: + job_json = json.dumps(job_info) + + return job_json + + def jobs_for_run(self, run_name): + """ + Given a run name, look on the filesystem for directories containing job + information, and return a dict mapping job IDs to job directories. + + :param run_name: The name of the run. + :returns: A dict like: {'1': '/path/to/1', '2': 'path/to/2'} + """ + archive_dir = os.path.join(self.archive_base, run_name) + if not os.path.isdir(archive_dir): + return {} + jobs = {} + for item in os.listdir(archive_dir): + if not re.match('\d+$', item): + continue + job_id = item + job_dir = os.path.join(archive_dir, job_id) + if os.path.isdir(job_dir): + jobs[job_id] = job_dir + return jobs + + def running_jobs_for_run(self, run_name): + """ + Like jobs_for_run(), but only returns jobs with no summary.yaml + + :param run_name: The name of the run. + :returns: A dict like: {'1': '/path/to/1', '2': 'path/to/2'} + """ + jobs = self.jobs_for_run(run_name) + for job_id in list(jobs): + if os.path.exists(os.path.join(jobs[job_id], 'summary.yaml')): + jobs.pop(job_id) + return jobs + + @property + def all_runs(self): + """ + Look in the base archive directory for all test runs. Return a list of + their names. + """ + archive_base = self.archive_base + if not os.path.isdir(archive_base): + return [] + runs = [] + for run_name in os.listdir(archive_base): + if not os.path.isdir(os.path.join(archive_base, run_name)): + continue + runs.append(run_name) + return runs + + +class ResultsReporter(object): + last_run_file = 'last_successful_run' + + def __init__(self, archive_base=None, base_uri=None, save=False, + refresh=False, log=None): + self.log = log or init_logging() + self.archive_base = archive_base or config.archive_base + self.base_uri = base_uri or config.results_server + if self.base_uri: + self.base_uri = self.base_uri.rstrip('/') + + self.serializer = ResultsSerializer(archive_base, log=self.log) + self.save_last_run = save + self.refresh = refresh + self.session = self._make_session() + + if not self.base_uri: + msg = "No results_server set in {yaml}; cannot report results" + self.log.warning(msg.format(yaml=config.yaml_path)) + + def _make_session(self, max_retries=10): + session = requests.Session() + adapter = requests.adapters.HTTPAdapter(max_retries=max_retries) + session.mount('http://', adapter) + return session + + def report_all_runs(self): + """ + Report *all* runs in self.archive_dir to the results server. + """ + all_runs = self.serializer.all_runs + last_run = self.last_run + if self.save_last_run and last_run and last_run in all_runs: + next_index = all_runs.index(last_run) + 1 + runs = all_runs[next_index:] + else: + runs = all_runs + return self.report_runs(runs) + + def report_runs(self, run_names): + """ + Report several runs to the results server. + + :param run_names: The names of the runs. + """ + num_runs = len(run_names) + num_jobs = 0 + self.log.info("Posting %s runs", num_runs) + for run in run_names: + job_count = self.report_run(run) + num_jobs += job_count + if self.save_last_run: + self.last_run = run + del self.last_run + self.log.info("Total: %s jobs in %s runs", num_jobs, len(run_names)) + + def report_run(self, run_name, dead=False): + """ + Report a single run to the results server. + + :param run_name: The name of the run. + :returns: The number of jobs reported. + """ + jobs = self.serializer.jobs_for_run(run_name) + self.log.info("{name} {jobs} jobs dead={dead}".format( + name=run_name, + jobs=len(jobs), + dead=str(dead), + )) + if jobs: + if not self.refresh: + response = self.session.head("{base}/runs/{name}/".format( + base=self.base_uri, name=run_name)) + if response.status_code == 200: + self.log.info(" already present; skipped") + return 0 + self.report_jobs(run_name, jobs.keys(), dead=dead) + elif not jobs: + self.log.debug(" no jobs; skipped") + return len(jobs) + + def report_jobs(self, run_name, job_ids, dead=False): + """ + Report several jobs to the results server. + + :param run_name: The name of the run. + :param job_ids: The jobs' ids + """ + for job_id in job_ids: + self.report_job(run_name, job_id, dead=dead) + + def report_job(self, run_name, job_id, job_info=None, dead=False): + """ + Report a single job to the results server. + + :param run_name: The name of the run. The run must already exist. + :param job_id: The job's id + :param job_info: The job's info dict. Optional - if not present, we + look at the archive. + """ + if job_info is not None and not isinstance(job_info, dict): + raise TypeError("job_info must be a dict") + run_uri = "{base}/runs/{name}/jobs/".format( + base=self.base_uri, name=run_name,) + if job_info is None: + job_info = self.serializer.job_info(run_name, job_id) + if dead and get_status(job_info) is None: + set_status(job_info, 'dead') + job_json = json.dumps(job_info) + headers = {'content-type': 'application/json'} + + inc = random.uniform(0, 1) + with safe_while( + sleep=1, increment=inc, action=f'report job {job_id}') as proceed: + while proceed(): + response = self.session.post(run_uri, data=job_json, headers=headers) + + if response.status_code == 200: + return + + # This call is wrapped in a try/except because of: + # http://tracker.ceph.com/issues/8166 + try: + resp_json = response.json() + except ValueError: + resp_json = dict() + + if resp_json: + msg = resp_json.get('message', '') + else: + msg = response.text + + if msg and msg.endswith('already exists'): + job_uri = os.path.join(run_uri, job_id, '') + response = self.session.put(job_uri, data=job_json, + headers=headers) + if response.status_code == 200: + return + elif msg: + self.log.error( + "POST to {uri} failed with status {status}: {msg}".format( + uri=run_uri, + status=response.status_code, + msg=msg, + )) + response.raise_for_status() + + @property + def last_run(self): + """ + The last run to be successfully reported. + """ + if hasattr(self, '__last_run'): + return self.__last_run + elif os.path.exists(self.last_run_file): + with open(self.last_run_file) as f: + self.__last_run = f.read().strip() + return self.__last_run + + @last_run.setter + def last_run(self, run_name): + self.__last_run = run_name + with open(self.last_run_file, 'w') as f: + f.write(run_name) + + @last_run.deleter + def last_run(self): + self.__last_run = None + if os.path.exists(self.last_run_file): + os.remove(self.last_run_file) + + def get_jobs(self, run_name, job_id=None, fields=None): + """ + Query the results server for jobs in a run + + :param run_name: The name of the run + :param job_id: Optionally get a single job instead of all + :param fields: Optional. A list of fields to include in the result. + Defaults to returning all fields. + """ + uri = "{base}/runs/{name}/jobs/".format(base=self.base_uri, + name=run_name) + if job_id: + uri = os.path.join(uri, job_id) + if fields: + if 'job_id' not in fields: + fields.append('job_id') + uri += "?fields=" + ','.join(fields) + response = self.session.get(uri) + response.raise_for_status() + return response.json() + + def get_run(self, run_name, fields=None): + """ + Query the results server for a run + + :param run_name: The name of the run + :param fields: Optional. A list of fields to include in the result. + Defaults to returning all fields. + """ + uri = "{base}/runs/{name}".format(base=self.base_uri, name=run_name) + if fields: + uri += "?fields=" + ','.join(fields) + response = self.session.get(uri) + response.raise_for_status() + return response.json() + + def _parse_log_line(self, line, prefix): + # parse log lines like + # 2018-07-27T00:30:55.967 INFO:teuthology.results:subset: '35/999' + msg = line.split(' ', 1)[1].split(':', 2)[-1] + if not msg.startswith(prefix): + return None + else: + return msg[len(prefix):].strip(" '") + + def get_rerun_conf(self, run_name): + log_path = os.path.join(self.archive_base, run_name, 'results.log') + # parse the log file generated by teuthology.results.results() + subset = None + no_nested_subset = None + seed = None + with open(log_path) as results_log: + for line in results_log: + if ':' not in line: + # stop if this does not look line a log line + break + line = line.strip() + if subset is None: + subset = self._parse_log_line(line, 'subset:') + if no_nested_subset is None: + no_nested_subset = self._parse_log_line(line, 'no_nested_subset:') + if seed is None: + seed = self._parse_log_line(line, 'seed:') + if subset is not None: + subset = tuple(int(i) for i in subset.split('/')) + if no_nested_subset is not None: + no_nested_subset = bool(no_nested_subset) + if seed is not None: + seed = int(seed) + return subset, no_nested_subset, seed + + def delete_job(self, run_name, job_id): + """ + Delete a job from the results server. + + :param run_name: The name of the run + :param job_id: The job's id + """ + uri = "{base}/runs/{name}/jobs/{job_id}/".format( + base=self.base_uri, name=run_name, job_id=job_id) + response = self.session.delete(uri) + response.raise_for_status() + + def delete_jobs(self, run_name, job_ids): + """ + Delete multiple jobs from the results server. + + :param run_name: The name of the run + :param job_ids: A list of job ids + """ + for job_id in job_ids: + self.delete_job(self, run_name, job_id) + + def delete_run(self, run_name): + """ + Delete a run from the results server. + + :param run_name: The name of the run + """ + uri = "{base}/runs/{name}/".format( + base=self.base_uri, name=run_name) + response = self.session.delete(uri) + response.raise_for_status() + + +def push_job_info(run_name, job_id, job_info, base_uri=None): + """ + Push a job's info (example: ctx.config) to the results server. + + :param run_name: The name of the run. + :param job_id: The job's id + :param job_info: A dict containing the job's information. + :param base_uri: The endpoint of the results server. If you leave it out + ResultsReporter will ask teuthology.config. + """ + reporter = ResultsReporter() + if not reporter.base_uri: + return + reporter.report_job(run_name, job_id, job_info) + + +def try_push_job_info(job_config, extra_info=None): + """ + Wrap push_job_info, gracefully doing nothing if: + Anything inheriting from requests.exceptions.RequestException is raised + A socket.error is raised + config.results_server is not set + config['job_id'] is not present or is None + + :param job_config: The ctx.config object to push + :param extra_info: Optional second dict to push + """ + log = init_logging() + + if not config.results_server: + log.warning('No results_server in config; not reporting results') + return + + if job_config.get('job_id') is None: + log.warning('No job_id found; not reporting results') + return + + run_name = job_config['name'] + job_id = job_config['job_id'] + + if extra_info is not None: + job_info = job_config.copy() + job_info.update(extra_info) + else: + job_info = job_config + + try: + log.debug("Pushing job info to %s", config.results_server) + push_job_info(run_name, job_id, job_info) + return + except report_exceptions: + log.exception("Could not report results to %s", + config.results_server) + + +def try_delete_jobs(run_name, job_ids, delete_empty_run=True): + """ + Using the same error checking and retry mechanism as try_push_job_info(), + delete one or more jobs + + :param run_name: The name of the run. + :param job_ids: Either a single job_id, or a list of job_ids + :param delete_empty_run: If this would empty the run, delete it. + """ + log = init_logging() + + if not isinstance(job_ids, list): + if isinstance(job_ids, int): + job_ids = [str(job_ids)] + elif isinstance(job_ids, bytes): + job_ids = [str(job_ids.decode())] + else: + job_ids = [job_ids] + + reporter = ResultsReporter() + if not reporter.base_uri: + return + + log.debug("Deleting jobs from {server}: {jobs}".format( + server=config.results_server, jobs=str(job_ids))) + + if delete_empty_run: + got_jobs = reporter.get_jobs(run_name, fields=['job_id']) + got_job_ids = [j['job_id'] for j in got_jobs] + if sorted(got_job_ids) == sorted(job_ids): + try: + reporter.delete_run(run_name) + return + except report_exceptions: + log.exception("Run deletion failed") + + def try_delete_job(job_id): + try: + reporter.delete_job(run_name, job_id) + return + except report_exceptions: + log.exception("Job deletion failed") + + for job_id in job_ids: + try_delete_job(job_id) + + +def try_mark_run_dead(run_name): + """ + Using the same error checking and retry mechanism as try_push_job_info(), + mark any unfinished runs as dead. + + :param run_name: The name of the run. + """ + log = init_logging() + reporter = ResultsReporter() + if not reporter.base_uri: + return + + log.debug("Marking run as dead: {name}".format(name=run_name)) + jobs = reporter.get_jobs(run_name, fields=['status']) + for job in jobs: + if job['status'] not in ['pass', 'fail', 'dead']: + job_id = job['job_id'] + try: + log.info("Marking job {job_id} as dead".format(job_id=job_id)) + reporter.report_job(run_name, job['job_id'], dead=True) + except report_exceptions: + log.exception("Could not mark job as dead: {job_id}".format( + job_id=job_id)) diff --git a/teuthology/results.py b/teuthology/results.py new file mode 100644 index 0000000000..aae991eaf1 --- /dev/null +++ b/teuthology/results.py @@ -0,0 +1,272 @@ +import os +import time +import logging +from collections import OrderedDict +from textwrap import dedent +from textwrap import fill + +import teuthology +from teuthology.config import config +from teuthology import misc +from teuthology.report import ResultsReporter +from teuthology.scrape import Scraper + +log = logging.getLogger(__name__) + +UNFINISHED_STATUSES = ('queued', 'running', 'waiting') + + +def main(args): + + log = logging.getLogger(__name__) + if args['--verbose']: + teuthology.log.setLevel(logging.DEBUG) + + if not args['--dry-run']: + log_path = os.path.join(args['--archive-dir'], 'results.log') + teuthology.setup_log_file(log_path) + + try: + if args['--seed']: + note_rerun_params(args['--subset'], args['--no-nested-subset'], args['--seed']) + else: + results(args['--archive-dir'], args['--name'], args['--email'], + int(args['--timeout']), args['--dry-run']) + except Exception: + log.exception('error generating memo/results') + raise + + +def note_rerun_params(subset, no_nested_subset, seed): + if subset: + log.info('subset: %r', subset) + if no_nested_subset: + log.info('no_nested_subset: %r', no_nested_subset) + if seed: + log.info('seed: %r', seed) + + +def results(archive_dir, name, email, timeout, dry_run): + starttime = time.time() + + if timeout: + log.info('Waiting up to %d seconds for tests to finish...', timeout) + + reporter = ResultsReporter() + while timeout > 0: + if time.time() - starttime > timeout: + log.warning('test(s) did not finish before timeout of %d seconds', + timeout) + break + jobs = reporter.get_jobs(name, fields=['job_id', 'status']) + unfinished_jobs = [job for job in jobs if job['status'] in + UNFINISHED_STATUSES] + if not unfinished_jobs: + log.info('Tests finished! gathering results...') + break + time.sleep(60) + + (subject, body) = build_email_body(name) + + Scraper(archive_dir).analyze() + if email and dry_run: + print("From: %s" % (config.results_sending_email or 'teuthology')) + print("To: %s" % email) + print("Subject: %s" % subject) + print(body) + elif email: + email_results( + subject=subject, + from_=(config.results_sending_email or 'teuthology'), + to=email, + body=body, + ) + + +def email_results(subject, from_, to, body): + log.info('Sending results to {to}: {body}'.format(to=to, body=body)) + import smtplib + from email.mime.text import MIMEText + msg = MIMEText(body) + msg['Subject'] = subject + msg['From'] = from_ + msg['To'] = to + log.debug('sending email %s', msg.as_string()) + smtp = smtplib.SMTP('localhost') + smtp.sendmail(msg['From'], [msg['To']], msg.as_string()) + smtp.quit() + + +def build_email_body(name, _reporter=None): + stanzas = OrderedDict([ + ('fail', dict()), + ('dead', dict()), + ('running', dict()), + ('waiting', dict()), + ('queued', dict()), + ('pass', dict()), + ]) + reporter = _reporter or ResultsReporter() + fields = ('job_id', 'status', 'description', 'duration', 'failure_reason', + 'sentry_event', 'log_href') + jobs = reporter.get_jobs(name, fields=fields) + jobs.sort(key=lambda job: job['job_id']) + + for job in jobs: + job_stanza = format_job(name, job) + stanzas[job['status']][job['job_id']] = job_stanza + + sections = OrderedDict.fromkeys(stanzas.keys(), '') + subject_fragments = [] + for status in sections.keys(): + stanza = stanzas[status] + if stanza: + subject_fragments.append('%s %s' % (len(stanza), status)) + sections[status] = email_templates['sect_templ'].format( + title=status.title(), + jobs=''.join(stanza.values()), + ) + subject = ', '.join(subject_fragments) + ' ' + + if config.archive_server: + log_root = os.path.join(config.archive_server, name, '') + else: + log_root = None + + body = email_templates['body_templ'].format( + name=name, + info_root=misc.get_results_url(name), + log_root=log_root, + fail_count=len(stanzas['fail']), + dead_count=len(stanzas['dead']), + running_count=len(stanzas['running']), + waiting_count=len(stanzas['waiting']), + queued_count=len(stanzas['queued']), + pass_count=len(stanzas['pass']), + fail_sect=sections['fail'], + dead_sect=sections['dead'], + running_sect=sections['running'], + waiting_sect=sections['waiting'], + queued_sect=sections['queued'], + pass_sect=sections['pass'], + ) + + subject += 'in {suite}'.format(suite=name) + return (subject.strip(), body.strip()) + + +def format_job(run_name, job): + job_id = job['job_id'] + status = job['status'] + description = job['description'] + duration = seconds_to_hms(int(job['duration'] or 0)) + + # Every job gets a link to e.g. pulpito's pages + info_url = misc.get_results_url(run_name, job_id) + if info_url: + info_line = email_templates['info_url_templ'].format(info=info_url) + else: + info_line = '' + + if status in UNFINISHED_STATUSES: + format_args = dict( + job_id=job_id, + desc=description, + time=duration, + info_line=info_line, + ) + return email_templates['running_templ'].format(**format_args) + + if status == 'pass': + return email_templates['pass_templ'].format( + job_id=job_id, + desc=description, + time=duration, + info_line=info_line, + ) + else: + log_dir_url = job['log_href'].rstrip('teuthology.yaml') + if log_dir_url: + log_line = email_templates['fail_log_templ'].format( + log=log_dir_url) + else: + log_line = '' + sentry_event = job.get('sentry_event') + if sentry_event: + sentry_line = email_templates['fail_sentry_templ'].format( + sentry_event=sentry_event) + else: + sentry_line = '' + + if job['failure_reason']: + # 'fill' is from the textwrap module and it collapses a given + # string into multiple lines of a maximum width as specified. + # We want 75 characters here so that when we indent by 4 on the + # next line, we have 79-character exception paragraphs. + reason = fill(job['failure_reason'] or '', 75) + reason = \ + '\n'.join((' ') + line for line in reason.splitlines()) + reason_lines = email_templates['fail_reason_templ'].format( + reason=reason).rstrip() + else: + reason_lines = '' + + format_args = dict( + job_id=job_id, + desc=description, + time=duration, + info_line=info_line, + log_line=log_line, + sentry_line=sentry_line, + reason_lines=reason_lines, + ) + return email_templates['fail_templ'].format(**format_args) + + +def seconds_to_hms(seconds): + (minutes, seconds) = divmod(seconds, 60) + (hours, minutes) = divmod(minutes, 60) + return "%02d:%02d:%02d" % (hours, minutes, seconds) + + +email_templates = { + 'body_templ': dedent("""\ + Test Run: {name} + ================================================================= + info: {info_root} + logs: {log_root} + failed: {fail_count} + dead: {dead_count} + running: {running_count} + waiting: {waiting_count} + queued: {queued_count} + passed: {pass_count} + + {fail_sect}{dead_sect}{running_sect}{waiting_sect}{queued_sect}{pass_sect} + """), + 'sect_templ': dedent("""\ + + {title} + ================================================================= + {jobs} + """), + 'fail_templ': dedent("""\ + [{job_id}] {desc} + ----------------------------------------------------------------- + time: {time}{info_line}{log_line}{sentry_line}{reason_lines} + + """), + 'info_url_templ': "\ninfo: {info}", + 'fail_log_templ': "\nlog: {log}", + 'fail_sentry_templ': "\nsentry: {sentry_event}", + 'fail_reason_templ': "\n\n{reason}\n", + 'running_templ': dedent("""\ + [{job_id}] {desc}{info_line} + + """), + 'pass_templ': dedent("""\ + [{job_id}] {desc} + time: {time}{info_line} + + """), +} diff --git a/teuthology/run.py b/teuthology/run.py new file mode 100644 index 0000000000..e065495cff --- /dev/null +++ b/teuthology/run.py @@ -0,0 +1,414 @@ +import os +import yaml +import sys +import logging + +import teuthology +from teuthology import install_except_hook +from teuthology import report +from teuthology.job_status import get_status +from teuthology.misc import get_user, merge_configs +from teuthology.nuke import nuke +from teuthology.run_tasks import run_tasks +from teuthology.repo_utils import fetch_qa_suite +from teuthology.results import email_results +from teuthology.config import FakeNamespace +from teuthology.config import config as teuth_config + +log = logging.getLogger(__name__) + + +def set_up_logging(verbose, archive): + if verbose: + teuthology.log.setLevel(logging.DEBUG) + + if archive is not None: + if not os.path.isdir(archive): + os.mkdir(archive) + + teuthology.setup_log_file(os.path.join(archive, 'teuthology.log')) + + install_except_hook() + + +def write_initial_metadata(archive, config, name, description, owner): + if archive is not None: + with open(os.path.join(archive, 'pid'), 'w') as f: + f.write('%d' % os.getpid()) + + with open(os.path.join(archive, 'owner'), 'w') as f: + f.write(owner + '\n') + + with open(os.path.join(archive, 'orig.config.yaml'), 'w') as f: + yaml.safe_dump(config, f, default_flow_style=False) + + info = { + 'name': name, + 'description': description, + 'owner': owner, + 'pid': os.getpid(), + } + if 'job_id' in config: + info['job_id'] = config['job_id'] + + with open(os.path.join(archive, 'info.yaml'), 'w') as f: + yaml.safe_dump(info, f, default_flow_style=False) + + +def fetch_tasks_if_needed(job_config): + """ + Fetch the suite repo (and include it in sys.path) so that we can use its + tasks. + + Returns the suite_path. The existing suite_path will be returned if the + tasks can be imported, if not a new suite_path will try to be determined. + """ + # Any scheduled job will already have the suite checked out and its + # $PYTHONPATH set. We can check for this by looking for 'suite_path' + # in its config. + suite_path = job_config.get('suite_path') + if suite_path: + log.info("suite_path is set to %s; will attempt to use it", suite_path) + if suite_path not in sys.path: + sys.path.insert(1, suite_path) + + try: + import tasks + log.info("Found tasks at %s", os.path.dirname(tasks.__file__)) + # tasks found with the existing suite branch, return it + return suite_path + except ImportError: + log.info("Tasks not found; will attempt to fetch") + + ceph_branch = job_config.get('branch', 'main') + suite_repo = job_config.get('suite_repo') + if suite_repo: + teuth_config.ceph_qa_suite_git_url = suite_repo + suite_branch = job_config.get('suite_branch', ceph_branch) + suite_sha1 = job_config.get('suite_sha1') + suite_path = os.path.normpath(os.path.join( + fetch_qa_suite(suite_branch, commit=suite_sha1), + job_config.get('suite_relpath', 'qa'), + )) + sys.path.insert(1, suite_path) + return suite_path + + +def setup_config(config_paths): + """ + Takes a list of config yaml files and combines them + into a single dictionary. Processes / validates the dictionary and then + returns it. + """ + config = merge_configs(config_paths) + + # Older versions of teuthology stored job_id as an int. Convert it to a str + # if necessary. + job_id = config.get('job_id') + if job_id is not None: + job_id = str(job_id) + config['job_id'] = job_id + + # targets must be >= than roles + if 'targets' in config and 'roles' in config: + targets = len(config['targets']) + roles = len(config['roles']) + assert targets >= roles, \ + '%d targets are needed for all roles but found %d listed.' % ( + roles, targets) + + return config + + +def get_machine_type(machine_type, config): + """ + If no machine_type is given, find the appropriate machine_type + from the given config. + """ + if machine_type is None: + fallback_default = config.get('machine_type', + teuth_config.default_machine_type) + machine_type = config.get('machine-type', fallback_default) + + return machine_type + + +def get_summary(owner, description): + summary = dict(success=True) + summary['owner'] = owner + + if description is not None: + summary['description'] = description + + return summary + + +def validate_tasks(config): + """ + Ensures that config tasks is a list and doesn't include 'kernel'. + + Returns the original tasks key if found. If not, returns an + empty list. + """ + if 'tasks' not in config: + log.warning('No tasks specified. Continuing anyway...') + # return the default value for tasks + return [] + + msg = "Expected list in 'tasks'; instead got: {0}".format(config['tasks']) + assert isinstance(config['tasks'], list), msg + + for task in config['tasks']: + msg = ('kernel installation should be a base-level item, not part ' + + 'of the tasks list') + assert 'kernel' not in task, msg + + return config["tasks"] + + +def get_initial_tasks(lock, config, machine_type): + init_tasks = [] + overrides = config.get('overrides', {}) + having_repos = ('repos' in config.get('install', {}) or + 'repos' in overrides.get('install', {})) + if 'redhat' in config: + pass + elif having_repos: + pass + elif not config.get('verify_ceph_hash', True): + pass + else: + init_tasks += [ + {'internal.check_packages': None}, + {'internal.buildpackages_prep': None}, + ] + if 'roles' in config and lock: + msg = ('You cannot specify targets in a config file when using the ' + + '--lock option') + assert 'targets' not in config, msg + init_tasks.append({'internal.lock_machines': ( + len(config['roles']), machine_type)}) + + init_tasks.append({'internal.save_config': None}) + + if 'roles' in config: + init_tasks.append({'internal.check_lock': None}) + + init_tasks.append({'internal.add_remotes': None}) + + if 'roles' in config: + init_tasks.extend([ + {'console_log': None}, + {'internal.connect': None}, + {'internal.push_inventory': None}, + {'internal.serialize_remote_roles': None}, + {'internal.check_conflict': None}, + ]) + + if ('roles' in config and + not config.get('use_existing_cluster', False)): + init_tasks.extend([ + {'internal.check_ceph_data': None}, + {'internal.vm_setup': None}, + ]) + + # install_latest_rh_kernel is used for redhat config + if 'redhat' not in config and 'kernel' in config: + init_tasks.append({'kernel': config['kernel']}) + + if 'roles' in config: + init_tasks.append({'internal.base': None}) + init_tasks.append({'internal.archive_upload': None}) + if 'roles' in config: + init_tasks.extend([ + {'internal.archive': None}, + {'internal.coredump': None}, + {'internal.sudo': None}, + {'internal.syslog': None}, + ]) + init_tasks.append({'internal.timer': None}) + + if 'roles' in config: + init_tasks.extend([ + {'pcp': None}, + {'selinux': None}, + ]) + + if 'redhat' in config: + init_tasks.extend([ + {'internal.setup_stage_cdn': None}]) + + if config.get('ceph_cm_ansible', True): + init_tasks.append({'ansible.cephlab': None}) + + # clock_sync_task: 'clock' or 'clock.check' + clock_sync_task = config.get('clock_sync_task', 'clock') + init_tasks.append({clock_sync_task: None}) + + if 'redhat' in config: + init_tasks.extend([ + {'internal.git_ignore_ssl': None}, + {'internal.setup_cdn_repo': None}, + {'internal.setup_base_repo': None}, + {'internal.setup_additional_repo': None}, + {'internal.setup_container_registry': None}, + {'install': None}, + ]) + # Install latest kernel task for redhat downstream runs + if config.get('redhat').get('install_latest_rh_kernel', False): + init_tasks.extend({'kernel.install_latest_rh_kernel': None}) + + return init_tasks + + +def report_outcome(config, archive, summary, fake_ctx): + """ Reports on the final outcome of the command. """ + status = get_status(summary) + passed = status == 'pass' + + if not passed and bool(config.get('nuke-on-error')): + # only unlock if we locked them in the first place + nuke(fake_ctx, fake_ctx.lock) + + if archive is not None: + with open(os.path.join(archive, 'summary.yaml'), 'w') as f: + yaml.safe_dump(summary, f, default_flow_style=False) + + summary_dump = yaml.safe_dump(summary) + log.info('Summary data:\n%s' % summary_dump) + + if ('email-on-error' in config + and not passed): + config_dump = yaml.safe_dump(config) + subject = "Teuthology error -- %s" % summary['failure_reason'] + email_results(subject, "Teuthology", config['email-on-error'], + "\n".join([summary_dump, config_dump])) + + + report.try_push_job_info(config, summary) + + if passed: + log.info(status) + else: + log.info(str(status).upper()) + sys.exit(1) + + +def get_teuthology_command(args): + """ + Rebuilds the teuthology command used to run this job + and returns it as a string. + """ + cmd = ["teuthology"] + for key, value in args.items(): + if value: + # an option, not an argument + if not key.startswith("<"): + cmd.append(key) + else: + # this is the argument + for arg in value: + cmd.append(str(arg)) + continue + # so we don't print something like --verbose True + if isinstance(value, str): + cmd.append(value) + return " ".join(cmd) + + +def main(args): + verbose = args["--verbose"] + archive = args["--archive"] + owner = args["--owner"] + config = args[""] + name = args["--name"] + description = args["--description"] + machine_type = args["--machine-type"] + block = args["--block"] + lock = args["--lock"] + suite_path = args["--suite-path"] + os_type = args["--os-type"] + os_version = args["--os-version"] + interactive_on_error = args["--interactive-on-error"] + + set_up_logging(verbose, archive) + + # print the command being ran + log.debug("Teuthology command: {0}".format(get_teuthology_command(args))) + + if owner is None: + args["--owner"] = owner = get_user() + + config = setup_config(config) + + if archive is not None and 'archive_path' not in config: + config['archive_path'] = archive + + write_initial_metadata(archive, config, name, description, owner) + report.try_push_job_info(config, dict(status='running')) + + machine_type = get_machine_type(machine_type, config) + args["--machine-type"] = machine_type + + if block: + assert lock, \ + 'the --block option is only supported with the --lock option' + + log.info( + '\n '.join(['Config:', ] + yaml.safe_dump( + config, default_flow_style=False).splitlines())) + + args["summary"] = get_summary(owner, description) + + ceph_repo = config.get('repo') + if ceph_repo: + teuth_config.ceph_git_url = ceph_repo + suite_repo = config.get('suite_repo') + if suite_repo: + teuth_config.ceph_qa_suite_git_url = suite_repo + + # overwrite the config values of os_{type,version} if corresponding + # command-line arguments are provided + if os_type: + config["os_type"] = os_type + if os_version: + config["os_version"] = os_version + + config["tasks"] = validate_tasks(config) + + init_tasks = get_initial_tasks(lock, config, machine_type) + + # prepend init_tasks to the front of the task list + config['tasks'][:0] = init_tasks + + if suite_path is not None: + config['suite_path'] = suite_path + + # fetches the tasks and returns a new suite_path if needed + config["suite_path"] = fetch_tasks_if_needed(config) + + # If the job has a 'use_shaman' key, use that value to override the global + # config's value. + if config.get('use_shaman') is not None: + teuth_config.use_shaman = config['use_shaman'] + + #could be refactored for setting and unsetting in hackish way + if interactive_on_error: + config['interactive-on-error'] = True + # create a FakeNamespace instance that mimics the old argparse way of doing + # things we do this so we can pass it to run_tasks without porting those + # tasks to the new way of doing things right now + args[""] = config + fake_ctx = FakeNamespace(args) + + # store on global config if interactive-on-error, for contextutil.nested() + # FIXME this should become more generic, and the keys should use + # '_' uniformly + if fake_ctx.config.get('interactive-on-error'): + teuthology.config.config.ctx = fake_ctx + + try: + run_tasks(tasks=config['tasks'], ctx=fake_ctx) + finally: + # print to stdout the results and possibly send an email on any errors + report_outcome(config, archive, fake_ctx.summary, fake_ctx) diff --git a/teuthology/run_tasks.py b/teuthology/run_tasks.py new file mode 100644 index 0000000000..598947c807 --- /dev/null +++ b/teuthology/run_tasks.py @@ -0,0 +1,360 @@ +import importlib +import jinja2 +import logging +import os +import sys +import time +import types +import yaml + +from copy import deepcopy +from humanfriendly import format_timespan +import sentry_sdk + +from teuthology.config import config as teuth_config +from teuthology.exceptions import ConnectionLostError +from teuthology.job_status import set_status, get_status +from teuthology.misc import get_http_log_path, get_results_url +from teuthology.timer import Timer + +log = logging.getLogger(__name__) + + +def get_task(name): + # todo: support of submodules + if '.' in name: + module_name, task_name = name.split('.') + else: + module_name, task_name = (name, 'task') + + # First look for the tasks's module inside teuthology + module = _import('teuthology.task', module_name, task_name) + # If it is not found, try qa/ directory (if it is in sys.path) + if not module: + module = _import('tasks', module_name, task_name, fail_on_import_error=True) + try: + # Attempt to locate the task object inside the module + task = getattr(module, task_name) + # If we get another module, we need to go deeper + if isinstance(task, types.ModuleType): + task = getattr(task, task_name) + except AttributeError: + log.error("No subtask of '{}' named '{}' was found".format( + module_name, + task_name, + )) + raise + return task + + +def _import(from_package, module_name, task_name, fail_on_import_error=False): + full_module_name = '.'.join([from_package, module_name]) + try: + module = __import__( + full_module_name, + globals(), + locals(), + [task_name], + 0, + ) + except ImportError: + if fail_on_import_error: + raise + else: + if ( + importlib.util.find_spec(from_package) is not None and + importlib.util.find_spec(full_module_name) is not None + ): + # If we get here, it means we could _find_ both the module and + # the package that contains it, but still got an ImportError. + # Typically that means the module failed to import because it + # could not find one of its dependencies; if we don't raise + # here it will look like we just could't find the module, + # making the dependency issue difficult to discover. + raise + return None + return module + + +def run_one_task(taskname, **kwargs): + taskname = taskname.replace('-', '_') + task = get_task(taskname) + return task(**kwargs) + + +def run_tasks(tasks, ctx): + archive_path = ctx.config.get('archive_path') + if archive_path: + timer = Timer( + path=os.path.join(archive_path, 'timing.yaml'), + sync=True, + ) + else: + timer = Timer() + stack = [] + try: + for taskdict in tasks: + try: + ((taskname, config),) = taskdict.items() + except (ValueError, AttributeError): + raise RuntimeError('Invalid task definition: %s' % taskdict) + log.info('Running task %s...', taskname) + timer.mark('%s enter' % taskname) + manager = run_one_task(taskname, ctx=ctx, config=config) + if hasattr(manager, '__enter__'): + stack.append((taskname, manager)) + manager.__enter__() + except BaseException as e: + if isinstance(e, ConnectionLostError): + # Prevent connection issues being flagged as failures + set_status(ctx.summary, 'dead') + else: + # the status may have been set to dead, leave it as-is if so + if not ctx.summary.get('status', '') == 'dead': + set_status(ctx.summary, 'fail') + if 'failure_reason' not in ctx.summary: + ctx.summary['failure_reason'] = str(e) + log.exception('Saw exception from tasks.') + + if teuth_config.sentry_dsn: + sentry_sdk.init(teuth_config.sentry_dsn) + config = deepcopy(ctx.config) + + tags = { + 'task': taskname, + 'owner': ctx.owner, + } + optional_tags = ('teuthology_branch', 'branch', 'suite', + 'machine_type', 'os_type', 'os_version') + for tag in optional_tags: + if tag in config: + tags[tag] = config[tag] + + # Remove ssh keys from reported config + if 'targets' in config: + targets = config['targets'] + for host in targets.keys(): + targets[host] = '' + + job_id = ctx.config.get('job_id') + archive_path = ctx.config.get('archive_path') + extras = dict(config=config, + ) + if job_id: + extras['logs'] = get_http_log_path(archive_path, job_id) + + fingerprint = e.fingerprint() if hasattr(e, 'fingerprint') else None + exc_id = sentry_sdk.capture_exception( + error=e, + tags=tags, + extras=extras, + fingerprint=fingerprint, + ) + event_url = "{server}/?query={id}".format( + server=teuth_config.sentry_server.strip('/'), id=exc_id) + log.exception(" Sentry event: %s" % event_url) + ctx.summary['sentry_event'] = event_url + + if ctx.config.get('interactive-on-error'): + ctx.config['interactive-on-error'] = False + from teuthology.task import interactive + log.warning('Saw failure during task execution, going into interactive mode...') + interactive.task(ctx=ctx, config=None) + # Throughout teuthology, (x,) = y has been used to assign values + # from yaml files where only one entry of type y is correct. This + # causes failures with 'too many values to unpack.' We want to + # fail as before, but with easier to understand error indicators. + if isinstance(e, ValueError): + if str(e) == 'too many values to unpack': + emsg = 'Possible configuration error in yaml file' + log.error(emsg) + ctx.summary['failure_info'] = emsg + finally: + try: + exc_info = sys.exc_info() + sleep_before_teardown = ctx.config.get('sleep_before_teardown') + if sleep_before_teardown: + log.info( + 'Sleeping for {} seconds before unwinding because' + ' --sleep-before-teardown was given...' + .format(sleep_before_teardown)) + notify_sleep_before_teardown(ctx, stack, sleep_before_teardown) + time.sleep(sleep_before_teardown) + while stack: + taskname, manager = stack.pop() + log.debug('Unwinding manager %s', taskname) + timer.mark('%s exit' % taskname) + try: + suppress = manager.__exit__(*exc_info) + except Exception as e: + if isinstance(e, ConnectionLostError): + # Prevent connection issues being flagged as failures + set_status(ctx.summary, 'dead') + else: + set_status(ctx.summary, 'fail') + if 'failure_reason' not in ctx.summary: + ctx.summary['failure_reason'] = str(e) + log.exception('Manager failed: %s', taskname) + + if exc_info == (None, None, None): + # if first failure is in an __exit__, we don't + # have exc_info set yet + exc_info = sys.exc_info() + + if ctx.config.get('interactive-on-error'): + from teuthology.task import interactive + log.warning( + 'Saw failure during task cleanup, going into interactive mode...') + interactive.task(ctx=ctx, config=None) + else: + if suppress: + exc_info = (None, None, None) + + if exc_info != (None, None, None): + log.debug('Exception was not quenched, exiting: %s: %s', + exc_info[0].__name__, exc_info[1]) + raise SystemExit(1) + finally: + # be careful about cyclic references + del exc_info + timer.mark("tasks complete") + + +def build_rocketchat_message(ctx, stack, sleep_time_sec, template_path=None): + message_template_path = template_path or os.path.dirname(__file__) + \ + '/templates/rocketchat-sleep-before-teardown.jinja2' + + with open(message_template_path) as f: + template_text = f.read() + + template = jinja2.Template(template_text) + archive_path = ctx.config.get('archive_path') + job_id = ctx.config.get('job_id') + status = get_status(ctx.summary) + stack_path = ' -> '.join(task for task, _ in stack) + suite_name=ctx.config.get('suite') + sleep_date=time.time() + sleep_date_str=time.strftime('%Y-%m-%d %H:%M:%S', + time.gmtime(sleep_date)) + + message = template.render( + sleep_time=format_timespan(sleep_time_sec), + sleep_time_sec=sleep_time_sec, + sleep_date=sleep_date_str, + owner=ctx.owner, + run_name=ctx.name, + job_id=ctx.config.get('job_id'), + job_desc=ctx.config.get('description'), + job_info=get_results_url(ctx.name, job_id), + job_logs=get_http_log_path(archive_path, job_id), + suite_name=suite_name, + status=status, + task_stack=stack_path, + ) + return message + + +def build_email_body(ctx, stack, sleep_time_sec): + email_template_path = os.path.dirname(__file__) + \ + '/templates/email-sleep-before-teardown.jinja2' + + with open(email_template_path) as f: + template_text = f.read() + + email_template = jinja2.Template(template_text) + archive_path = ctx.config.get('archive_path') + job_id = ctx.config.get('job_id') + status = get_status(ctx.summary) + stack_path = '/'.join(task for task, _ in stack) + suite_name=ctx.config.get('suite') + sleep_date=time.time() + sleep_date_str=time.strftime('%Y-%m-%d %H:%M:%S', + time.gmtime(sleep_date)) + + body = email_template.render( + sleep_time=format_timespan(sleep_time_sec), + sleep_time_sec=sleep_time_sec, + sleep_date=sleep_date_str, + owner=ctx.owner, + run_name=ctx.name, + job_id=ctx.config.get('job_id'), + job_info=get_results_url(ctx.name), + job_logs=get_http_log_path(archive_path, job_id), + suite_name=suite_name, + status=status, + task_stack=stack_path, + ) + subject = ( + 'teuthology job {run}/{job} has fallen asleep at {date}' + .format(run=ctx.name, job=job_id, date=sleep_date_str) + ) + return (subject.strip(), body.strip()) + + +def rocketchat_send_message(ctx, message, channels): + """ + Send the message to the given RocketChat channels + + Before sending the message we read the config file + from `~/.config/rocketchat.api/settings.yaml` which + must include next records: + + username: 'userloginname' + password: 'userbigsecret' + domain: 'https://chat.suse.de' + + :param message: plain text message content in the Rocket.Chat + messaging format + :param channels: a list of channels where to send the message, + the user private channel should be prefixed + with '@' symbol + """ + try: + from rocketchat.api import RocketChatAPI + except Exception as e: + log.warning(f'rocketchat: Failed to import rocketchat.api: {e}') + return + + settings_path = \ + os.environ.get('HOME') + '/.config/rocketchat.api/settings.yaml' + + try: + with open(settings_path) as f: + settings = yaml.safe_load(f) + except Exception as e: + log.warning(f'rocketchat: Failed to load settings from {settings_path}: {e}') + + r = RocketChatAPI(settings=settings) + for channel in channels: + try: + r.send_message(message, channel) + except Exception as e: + log.warning(f'rocketchat: Failed to send message to "{channel}" channel: {e}') + + +def notify_sleep_before_teardown(ctx, stack, sleep_time): + rocketchat = ctx.config.get('rocketchat', None) + + if rocketchat: + channels = [_ for _ in [_.strip() for _ in rocketchat.split(',')] if _] + log.info("Sending a message to Rocket.Chat channels: %s", channels) + message = build_rocketchat_message(ctx, stack, sleep_time) + rocketchat_send_message(ctx, message, channels) + + email = ctx.config.get('email', None) + if not email: + # we have no email configured, return silently + return + (subject, body) = build_email_body(ctx, stack, sleep_time) + log.info('Sending no to {to}: {body}'.format(to=email, body=body)) + import smtplib + from email.mime.text import MIMEText + msg = MIMEText(body) + msg['Subject'] = subject + msg['From'] = teuth_config.results_sending_email or 'teuthology' + msg['To'] = email + log.debug('sending email %s', msg.as_string()) + smtp = smtplib.SMTP('localhost') + smtp.sendmail(msg['From'], [msg['To']], msg.as_string()) + smtp.quit() + diff --git a/teuthology/safepath.py b/teuthology/safepath.py new file mode 100644 index 0000000000..b8115a25ed --- /dev/null +++ b/teuthology/safepath.py @@ -0,0 +1,42 @@ +import errno +import os + +def munge(path): + """ + Munge a potentially hostile path name to be safe to use. + + This very definitely changes the meaning of the path, + but it only does that for unsafe paths. + """ + # explicitly ignoring windows as a platform + segments = path.split('/') + # filter out empty segments like foo//bar + segments = [s for s in segments if s!=''] + # filter out no-op segments like foo/./bar + segments = [s for s in segments if s!='.'] + # all leading dots become underscores; makes .. safe too + for idx, seg in enumerate(segments): + if seg.startswith('.'): + segments[idx] = '_'+seg[1:] + # empty string, "/", "//", etc + if not segments: + segments = ['_'] + return '/'.join(segments) + + +def makedirs(root, path): + """ + os.makedirs gets confused if the path contains '..', and root might. + + This relies on the fact that `path` has been normalized by munge(). + """ + segments = path.split('/') + for seg in segments: + root = os.path.join(root, seg) + try: + os.mkdir(root) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + raise diff --git a/teuthology/schedule.py b/teuthology/schedule.py new file mode 100644 index 0000000000..d9af64efc4 --- /dev/null +++ b/teuthology/schedule.py @@ -0,0 +1,143 @@ +import os +import yaml + +import teuthology.beanstalk +from teuthology.misc import get_user, merge_configs +from teuthology import report + + +def main(args): + if not args['--first-in-suite']: + first_job_args = ['subset', 'no-nested-subset', 'seed'] + for arg in first_job_args: + opt = '--{arg}'.format(arg=arg) + msg_fmt = '{opt} is only applicable to the first job in a suite' + if args.get(opt): + raise ValueError(msg_fmt.format(opt=opt)) + + if not args['--last-in-suite']: + last_job_args = ['email', 'timeout'] + for arg in last_job_args: + opt = '--{arg}'.format(arg=arg) + msg_fmt = '{opt} is only applicable to the last job in a suite' + if args[opt]: + raise ValueError(msg_fmt.format(opt=opt)) + + if args['--first-in-suite'] or args['--last-in-suite']: + report_status = False + else: + report_status = True + + name = args['--name'] + if not name or name.isdigit(): + raise ValueError("Please use a more descriptive value for --name") + job_config = build_config(args) + backend = args['--queue-backend'] + if args['--dry-run']: + print('---\n' + yaml.safe_dump(job_config)) + elif backend == 'beanstalk': + schedule_job(job_config, args['--num'], report_status) + elif backend.startswith('@'): + dump_job_to_file(backend.lstrip('@'), job_config, args['--num']) + else: + raise ValueError("Provided schedule backend '%s' is not supported. " + "Try 'beanstalk' or '@path-to-a-file" % backend) + + +def build_config(args): + """ + Given a dict of arguments, build a job config + """ + config_paths = args.get('', list()) + conf_dict = merge_configs(config_paths) + # strip out targets; the worker will allocate new ones when we run + # the job with --lock. + if 'targets' in conf_dict: + del conf_dict['targets'] + args['config'] = conf_dict + + owner = args['--owner'] + if owner is None: + owner = 'scheduled_{user}'.format(user=get_user()) + + job_config = dict( + name=args['--name'], + first_in_suite=args['--first-in-suite'], + last_in_suite=args['--last-in-suite'], + email=args['--email'], + description=args['--description'], + owner=owner, + verbose=args['--verbose'], + machine_type=args['--worker'], + tube=args['--worker'], + priority=int(args['--priority']), + ) + # Update the dict we just created, and not the other way around, to let + # settings in the yaml override what's passed on the command line. This is + # primarily to accommodate jobs with multiple machine types. + job_config.update(conf_dict) + for arg,conf in {'--timeout':'results_timeout', + '--seed': 'seed', + '--subset': 'subset', + '--no-nested-subset': 'no_nested_subset'}.items(): + val = args.get(arg, None) + if val is not None: + job_config[conf] = val + + return job_config + + +def schedule_job(job_config, num=1, report_status=True): + """ + Schedule a job. + + :param job_config: The complete job dict + :param num: The number of times to schedule the job + """ + num = int(num) + job = yaml.safe_dump(job_config) + tube = job_config.pop('tube') + beanstalk = teuthology.beanstalk.connect() + beanstalk.use(tube) + while num > 0: + jid = beanstalk.put( + job, + ttr=60 * 60 * 24, + priority=job_config['priority'], + ) + print('Job scheduled with name {name} and ID {jid}'.format( + name=job_config['name'], jid=jid)) + job_config['job_id'] = str(jid) + if report_status: + report.try_push_job_info(job_config, dict(status='queued')) + num -= 1 + + +def dump_job_to_file(path, job_config, num=1): + """ + Schedule a job. + + :param job_config: The complete job dict + :param num: The number of times to schedule the job + :param path: The file path where the job config to append + """ + num = int(num) + count_file_path = path + '.count' + + jid = 0 + if os.path.exists(count_file_path): + with open(count_file_path, 'r') as f: + jid=int(f.read() or '0') + + with open(path, 'a') as f: + while num > 0: + jid += 1 + job_config['job_id'] = str(jid) + job = yaml.safe_dump(job_config) + print('Job scheduled with name {name} and ID {jid}'.format( + name=job_config['name'], jid=jid)) + f.write('---\n' + job) + num -= 1 + with open(count_file_path, 'w') as f: + f.write(str(jid)) + diff --git a/teuthology/scrape.py b/teuthology/scrape.py new file mode 100644 index 0000000000..0a737683fe --- /dev/null +++ b/teuthology/scrape.py @@ -0,0 +1,515 @@ +# Origin: https://github.com/jcsp/scrape/blob/master/scrape.py +# Author: John Spray (github.com/jcsp) + +import difflib +from errno import ENOENT +from gzip import GzipFile +import sys +import os +import yaml +from collections import defaultdict +import re +import logging +import subprocess + + +log = logging.getLogger('scrape') +log.addHandler(logging.StreamHandler()) +log.setLevel(logging.INFO) + + +class Reason(object): + def get_description(self): + return self.description + + def get_detail(self): + return None + + +def grep(path, expr): + """ + Call out to native grep rather than feeding massive log files through python line by line + """ + p = subprocess.Popen(["grep", expr, path], stdout=subprocess.PIPE, + universal_newlines=True) + p.wait() + out, err = p.communicate() + if p.returncode == 0: + return out.split("\n") + else: + return [] + + +class GenericReason(Reason): + """ + A reason inferred from a Job: matches Jobs with an apparently-similar failure + """ + + def __init__(self, job, description=None): + self.failure_reason = job.get_failure_reason() + self.description = description + + self.backtrace = job.get_backtrace() + if self.backtrace: + log.debug("Found a backtrace!\n{0}".format(self.backtrace)) + + def get_detail(self): + return self.backtrace + + def get_description(self): + if self.description: + return self.description + else: + if self.backtrace: + return "Crash: {0}".format(self.failure_reason) + else: + return "Failure: {0}".format(self.failure_reason) + + def match(self, job): + # I never match dead jobs + if job.get_failure_reason() is None: + return False + + # If one has a backtrace but the other doesn't, we're a different thing even if the official + # failure_reason is the same + if (self.backtrace is None) != (job.get_backtrace() is None): + return False + + # If we have the same backtrace, we're a match even if the teuthology failure_reason + # doesn't match (a crash is a crash, it can have different symptoms) + if self.backtrace: + ratio = difflib.SequenceMatcher(None, self.backtrace, job.get_backtrace()).ratio() + return ratio > 0.5 + else: + if "Test failure:" in self.failure_reason: + return self.failure_reason == job.get_failure_reason() + elif re.search("workunit test (.*)\) on ", self.failure_reason): + workunit_name = re.search("workunit test (.*)\) on ", self.failure_reason).group(1) + other_match = re.search("workunit test (.*)\) on ", job.get_failure_reason()) + return other_match is not None and workunit_name == other_match.group(1) + else: + reason_ratio = difflib.SequenceMatcher(None, self.failure_reason, job.get_failure_reason()).ratio() + return reason_ratio > 0.5 + + +class RegexReason(Reason): + """ + A known reason matching a particular regex to failure reason + """ + + def __init__(self, regexes, description): + self.description = description + if isinstance(regexes, list): + self.regexes = regexes + else: + self.regexes = [regexes] + + def match(self, job): + # I never match dead jobs + if job.get_failure_reason() is None: + return False + + for regex in self.regexes: + if re.match(regex, job.get_failure_reason()): + return True + + return False + + +class AssertionReason(Reason): + def __init__(self, job): + self.assertion = job.get_assertion() + self.backtrace = job.get_backtrace() + + def get_description(self): + return "Assertion: {0}".format(self.assertion) + + def get_detail(self): + return self.backtrace + + @classmethod + def could_be(cls, job): + return job.get_assertion() is not None + + def match(self, job): + return self.assertion == job.get_assertion() + + +class LockdepReason(AssertionReason): + """ + Different to a normal assertion, because matches should not only + have the same assertion but the same backtrace (don't want to glob + all lockdep failures together if they are really being tripped in + different places) + """ + @classmethod + def could_be(cls, job): + if not super(LockdepReason, cls).could_be(job): + return False + + return "common/lockdep" in job.get_assertion() + + def get_description(self): + return "Lockdep: {0}".format(self.assertion) + + def match(self, job): + if not super(LockdepReason, self).match(job): + return False + + if self.backtrace: + if job.get_backtrace(): + ratio = difflib.SequenceMatcher(None, self.backtrace, job.get_backtrace()).ratio() + return ratio > 0.5 + else: + return False + else: + # No backtrace to compare about, allow matches based purely on assertion + return True + + +class DeadReason(Reason): + """ + A reason for picking up jobs with no summary.yaml + """ + def __init__(self, job): + self.description = "Dead" + self.last_tlog_line = job.get_last_tlog_line() + self.backtrace = job.get_backtrace() + + def get_description(self): + return "Dead: {0}".format(self.last_tlog_line) + + def get_detail(self): + return self.backtrace + + @classmethod + def could_be(cls, job): + return job.summary_data is None + + def match(self, job): + if job.summary_data: + return False + + if self.backtrace: + if job.get_backtrace(): + # We both have backtrace: use that to decide if we're the same + ratio = difflib.SequenceMatcher(None, self.backtrace, job.get_backtrace()).ratio() + return ratio > 0.5 + else: + # I have BT but he doesn't, so we're different + return False + + if self.last_tlog_line or job.get_last_tlog_line(): + ratio = difflib.SequenceMatcher(None, self.last_tlog_line, + job.get_last_tlog_line()).ratio() + return ratio > 0.5 + return True + + +class TimeoutReason(Reason): + def __init__(self, job): + self.timeout, self.command = self.get_timeout(job) + + def get_description(self): + return "Timeout {0} running {1}".format( + self.timeout, self.command + ) + + @classmethod + def could_be(cls, job): + return cls.get_timeout(job) is not None + + @classmethod + def get_timeout(cls, job): + if job.get_failure_reason() is None: + return None + + match = re.search("status 124:.* timeout ([^ ]+) ([^']+)'", job.get_failure_reason()) + if not match: + return + + timeout, bin_path = match.groups() + + # Given a path like /home/ubuntu/cephtest/workunit.client.0/cephtool/test.sh + # ... strip it down to cephtool/test.sh + parts = bin_path.split(os.path.sep) + parts.reverse() + rparts = [] + for p in parts: + if 'workunit.' in p or 'cephtest' in p: + break + else: + rparts.append(p) + rparts.reverse() + command = os.path.sep.join(rparts) + + return timeout, command + + def match(self, job): + return self.get_timeout(job) == (self.timeout, self.command) + +MAX_TEUTHOLOGY_LOG = 1024 * 1024 * 100 +MAX_SVC_LOG = 100 * 1024 * 1024 +MAX_BT_LINES = 100 + + +class Job(object): + def __init__(self, path, job_id): + self.path = path + self.job_id = job_id + + try: + self.config = yaml.safe_load(open(os.path.join(self.path, "config.yaml"), 'r')) + self.description = self.config['description'] + assert self.description + except IOError: + self.config = None + self.description = None + + summary_path = os.path.join(self.path, "summary.yaml") + try: + self.summary_data = yaml.safe_load(open(summary_path, 'r')) + except IOError: + self.summary_data = None + + self.backtrace = None + self.assertion = None + self.populated = False + + def get_success(self): + if self.summary_data: + return self.summary_data['success'] + else: + return False + + def get_failure_reason(self): + if self.summary_data: + return self.summary_data['failure_reason'] + else: + return None + + def get_last_tlog_line(self): + t_path = os.path.join(self.path, "teuthology.log") + if not os.path.exists(t_path): + return None + else: + out, err = subprocess.Popen(["tail", "-n", "1", t_path], stdout=subprocess.PIPE).communicate() + return out.strip() + + def _search_backtrace(self, file_obj): + bt_lines = [] + assertion = None + for line in file_obj: + # Log prefix from teuthology.log + if ".stderr:" in line: + line = line.split(".stderr:")[1] + + if "FAILED assert" in line: + assertion = line.strip() + + if line.startswith(" ceph version"): + # The start of a backtrace! + bt_lines = [line] + elif line.startswith(" NOTE: a copy of the executable"): + # The backtrace terminated, if we have a buffer return it + if len(bt_lines): + return ("".join(bt_lines)).strip(), assertion + else: + log.warning("Saw end of BT but not start") + elif bt_lines: + # We're in a backtrace, push the line onto the list + if len(bt_lines) > MAX_BT_LINES: + # Something wrong with our parsing, drop it + log.warning("Ignoring malparsed backtrace: {0}".format( + ", ".join(bt_lines[0:3]) + )) + bt_lines = [] + bt_lines.append(line) + + return None, assertion + + def get_assertion(self): + if not self.populated: + self._populate_backtrace() + return self.assertion + + def get_backtrace(self): + if not self.populated: + self._populate_backtrace() + return self.backtrace + + def _populate_backtrace(self): + tlog_path = os.path.join(self.path, "teuthology.log") + try: + s = os.stat(tlog_path) + except OSError: + log.warning("Missing teuthology log {0}".format(tlog_path)) + return None + size = s.st_size + if size > MAX_TEUTHOLOGY_LOG: + log.debug("Ignoring teuthology log for job {0}, it is {1} bytes".format(self.job_id, size)) + return None + + self.backtrace, self.assertion = self._search_backtrace(open(tlog_path)) + if self.backtrace: + return + + for line in grep(tlog_path, "command crashed with signal"): + log.debug("Found a crash indication: {0}".format(line)) + # tasks.ceph.osd.1.plana82.stderr + match = re.search("tasks.ceph.([^\.]+).([^\.]+).([^\.]+).stderr", line) + if not match: + log.warning("Not-understood crash indication {0}".format(line)) + continue + svc, svc_id, hostname = match.groups() + gzipped_log_path = os.path.join( + self.path, "remote", hostname, "log", "ceph-{0}.{1}.log.gz".format(svc, svc_id)) + + try: + s = os.stat(gzipped_log_path) + except OSError as e: + if e.errno == ENOENT: + log.warning("Missing log {0}".format(gzipped_log_path)) + continue + else: + raise + + size = s.st_size + if size > MAX_SVC_LOG: + log.warning("Not checking for backtrace from {0}:{1}.{2} log, too large ({3})".format( + hostname, svc, svc_id, size + )) + continue + + bt, ass = self._search_backtrace(GzipFile(gzipped_log_path)) + if ass and not self.assertion: + self.assertion = ass + if bt: + self.backtrace = bt + return + + return None + + +class ValgrindReason(Reason): + def __init__(self, job): + assert self.could_be(job) + self.service_types = self._get_service_types(job) + + def _get_service_types(self, job): + """ + Get dict mapping service type 'osd' etc to sorted list of violation types 'Leak_PossiblyLost' etc + """ + + result = defaultdict(list) + # Lines like: + # 2014-08-22T20:07:18.668 ERROR:tasks.ceph:saw valgrind issue Leak_DefinitelyLost in /var/log/ceph/valgrind/osd.3.log.gz + for line in grep(os.path.join(job.path, "teuthology.log"), " in "): + match = re.search("(.+) in .+/(.+)", line) + if not match: + log.warning("Misunderstood line: {0}".format(line)) + continue + err_typ, log_basename = match.groups() + svc_typ = log_basename.split(".")[0] + if err_typ not in result[svc_typ]: + result[svc_typ].append(err_typ) + result[svc_typ] = sorted(result[svc_typ]) + + return dict(result) + + def get_description(self): + desc_bits = [] + for service, types in list(self.service_types.items()): + desc_bits.append("{0} ({1})".format(service, ", ".join(types))) + return "Valgrind: " + ", ".join(desc_bits) + + @classmethod + def could_be(cls, job): + return job.get_failure_reason() is not None and "saw valgrind issues" in job.get_failure_reason() + + def match(self, job): + return self._get_service_types(job) == self.service_types + + +known_reasons = [ + # If the failure reason indicates no packages found... + RegexReason(["Failed to fetch package version from http://", + "Command failed on .* with status 100: 'sudo apt-get update"] + , "Missing packages"), +] + + +def give_me_a_reason(job): + """ + If no existing reasons match the job, generate the most specific reason we can + """ + + # Note: because we match known reasons, including GenericReasons, before any of + # the Timeout/Valgrind whatever, even if a run is a timeout or a valgrind failure, + # it will get matched up with a backtrace or assertion if one is there, hiding + # the valgrind/timeout aspect. + + for r in known_reasons: + if r.match(job): + return r + + # NB ordering matters, LockdepReason must come before AssertionReason + for klass in [DeadReason, LockdepReason, AssertionReason, TimeoutReason, ValgrindReason]: + if klass.could_be(job): + return klass(job) + + return GenericReason(job) + + +class Scraper(object): + def __init__(self, target_dir): + self.target_dir = target_dir + log.addHandler(logging.FileHandler(os.path.join(target_dir, + "scrape.log"))) + + def analyze(self): + entries = os.listdir(self.target_dir) + jobs = [] + for entry in entries: + job_dir = os.path.join(self.target_dir, entry) + if os.path.isdir(job_dir): + jobs.append(Job(job_dir, entry)) + + log.info("Found {0} jobs".format(len(jobs))) + + passes = [] + reasons = defaultdict(list) + + for job in jobs: + if job.get_success(): + passes.append(job) + continue + + matched = False + for reason, reason_jobs in reasons.items(): + if reason.match(job): + reason_jobs.append(job) + matched = True + break + + if not matched: + reasons[give_me_a_reason(job)].append(job) + + log.info("Found {0} distinct failure reasons".format(len(reasons))) + for reason, jobs in list(reasons.items()): + job_spec = "{0} jobs: {1}".format(len(jobs), [j.job_id for j in jobs]) if len(jobs) < 30 else "{0} jobs".format(len(jobs)) + log.info(reason.get_description()) + detail = reason.get_detail() + if detail: + log.info(detail) + log.info(job_spec) + suites = [set(j.description.split()) for j in jobs if j.description != None] + if len(suites) > 1: + log.info("suites intersection: {0}".format(sorted(set.intersection(*suites)))) + log.info("suites union: {0}".format(sorted(set.union(*suites)))) + elif len(suites) == 1: + log.info("suites: {0}".format(sorted(suites[0]))) + log.info("") + +if __name__ == '__main__': + Scraper(sys.argv[1]).analyze() diff --git a/teuthology/suite/__init__.py b/teuthology/suite/__init__.py new file mode 100644 index 0000000000..be51e4dc3c --- /dev/null +++ b/teuthology/suite/__init__.py @@ -0,0 +1,236 @@ +# this file is responsible for submitting tests into the queue +# by generating combinations of facets found in +# https://github.com/ceph/ceph-qa-suite.git + +import logging +import os +import random +import time +from distutils.util import strtobool + +import teuthology +from teuthology.config import config, YamlConfig +from teuthology.report import ResultsReporter +from teuthology.results import UNFINISHED_STATUSES + +from teuthology.suite.run import Run +from teuthology.suite.util import schedule_fail + +log = logging.getLogger(__name__) + + +def override_arg_defaults(name, default, env=os.environ): + env_arg = { + '--ceph-repo' : 'TEUTH_CEPH_REPO', + '--suite-repo' : 'TEUTH_SUITE_REPO', + '--ceph-branch' : 'TEUTH_CEPH_BRANCH', + '--suite-branch' : 'TEUTH_SUITE_BRANCH', + } + if name in env_arg and env_arg[name] in env.keys(): + variable = env_arg[name] + value = env[variable] + log.debug("Default value for '{arg}' is overridden " + "from environment with: {val}" + .format(arg=name, val=value)) + return value + else: + return default + + +def process_args(args): + conf = YamlConfig() + rename_args = { + 'ceph': 'ceph_branch', + 'sha1': 'ceph_sha1', + 'kernel': 'kernel_branch', + '': 'base_yaml_paths', + 'filter': 'filter_in', + } + for (key, value) in args.items(): + # Translate --foo-bar to foo_bar + key = key.lstrip('--').replace('-', '_') + # Rename the key if necessary + key = rename_args.get(key) or key + if key == 'suite_branch': + value = value or override_arg_defaults('--suite-branch', None) + if key == 'suite' and value is not None: + value = normalize_suite_name(value) + if key == 'suite_relpath' and value is None: + value = '' + elif key in ('limit', 'priority', 'num', 'newest', 'seed', 'job_threshold'): + value = int(value) + elif key == 'subset' and value is not None: + # take input string '2/3' and turn into (2, 3) + value = tuple(map(int, value.split('/'))) + elif key in ('filter_all', 'filter_in', 'filter_out', 'rerun_statuses'): + if not value: + value = [] + else: + value = [x.strip() for x in value.split(',')] + elif key == 'ceph_repo': + value = expand_short_repo_name( + value, + config.get_ceph_git_url()) + elif key == 'suite_repo': + value = expand_short_repo_name( + value, + config.get_ceph_qa_suite_git_url()) + elif key in ('validate_sha1', 'filter_fragments'): + value = strtobool(value) + conf[key] = value + return conf + + +def normalize_suite_name(name): + return name.replace('/', ':') + +def expand_short_repo_name(name, orig): + # Allow shortname repo name 'foo' or 'foo/bar'. This works with + # github URLs, e.g. + # + # foo -> https://github.com/ceph/foo + # foo/bar -> https://github.com/foo/bar + # + # when the orig URL is also github. The two-level substitution may not + # work with some configs. + name_vec = name.split('/') + if name_vec[-1] == '': + del name_vec[-1] + if len(name_vec) <= 2 and name.count(':') == 0: + orig_vec = orig.split('/') + if orig_vec[-1] == '': + del orig_vec[-1] + return '/'.join(orig_vec[:-len(name_vec)] + name_vec) + '.git' + # otherwise, assume a full URL + return name + +def main(args): + conf = process_args(args) + if conf.verbose: + teuthology.log.setLevel(logging.DEBUG) + + dry_run = conf.dry_run + if not conf.machine_type or conf.machine_type == 'None': + if not config.default_machine_type or config.default_machine_type == 'None': + schedule_fail("Must specify a machine_type", dry_run=dry_run) + else: + conf.machine_type = config.default_machine_type + elif 'multi' in conf.machine_type: + schedule_fail("'multi' is not a valid machine_type. " + + "Maybe you want 'gibba,smithi,mira' or similar", dry_run=dry_run) + + if conf.email: + config.results_email = conf.email + if conf.archive_upload: + config.archive_upload = conf.archive_upload + log.info('Will upload archives to ' + conf.archive_upload) + + if conf.rerun: + rerun_filters = get_rerun_filters(conf.rerun, conf.rerun_statuses) + if len(rerun_filters['descriptions']) == 0: + log.warning( + "No jobs matched the status filters: %s", + conf.rerun_statuses, + ) + return + conf.filter_in.extend(rerun_filters['descriptions']) + conf.suite = normalize_suite_name(rerun_filters['suite']) + conf.subset, conf.no_nested_subset, conf.seed = get_rerun_conf(conf) + if conf.seed < 0: + conf.seed = random.randint(0, 9999) + log.info('Using random seed=%s', conf.seed) + + run = Run(conf) + name = run.name + run.prepare_and_schedule() + if not conf.dry_run and conf.wait: + return wait(name, config.max_job_time, + conf.archive_upload_url) + + +def get_rerun_filters(name, statuses): + reporter = ResultsReporter() + run = reporter.get_run(name) + filters = dict() + filters['suite'] = run['suite'] + jobs = [] + for job in run['jobs']: + if job['status'] in statuses: + jobs.append(job) + filters['descriptions'] = [job['description'] for job in jobs if job['description']] + return filters + + +def get_rerun_conf(conf): + reporter = ResultsReporter() + try: + subset, no_nested_subset, seed = reporter.get_rerun_conf(conf.rerun) + except IOError: + return conf.subset, conf.no_nested_subset, conf.seed + if seed is None: + return conf.subset, conf.no_nested_subset, conf.seed + if conf.seed < 0: + log.info('Using stored seed=%s', seed) + elif conf.seed != seed: + log.error('--seed {conf_seed} does not match with ' + + 'stored seed: {stored_seed}', + conf_seed=conf.seed, + stored_seed=seed) + if conf.subset is None: + log.info('Using stored subset=%s', subset) + elif conf.subset != subset: + log.error('--subset {conf_subset} does not match with ' + + 'stored subset: {stored_subset}', + conf_subset=conf.subset, + stored_subset=subset) + if conf.no_nested_subset is True: + log.info('Nested subsets disabled') + return subset, no_nested_subset, seed + + +class WaitException(Exception): + pass + + +def wait(name, max_job_time, upload_url): + stale_job = max_job_time + Run.WAIT_MAX_JOB_TIME + reporter = ResultsReporter() + past_unfinished_jobs = [] + progress = time.time() + log.info(f"waiting for the run {name} to complete") + log.debug("the list of unfinished jobs will be displayed " + "every " + str(Run.WAIT_PAUSE / 60) + " minutes") + exit_code = 0 + while True: + jobs = reporter.get_jobs(name, fields=['job_id', 'status']) + unfinished_jobs = [] + for job in jobs: + if job['status'] in UNFINISHED_STATUSES: + unfinished_jobs.append(job) + elif job['status'] != 'pass': + exit_code = 1 + if len(unfinished_jobs) == 0: + log.info("wait is done") + break + if (len(past_unfinished_jobs) == len(unfinished_jobs) and + time.time() - progress > stale_job): + raise WaitException( + "no progress since " + str(config.max_job_time) + + " + " + str(Run.WAIT_PAUSE) + " seconds") + if len(past_unfinished_jobs) != len(unfinished_jobs): + past_unfinished_jobs = unfinished_jobs + progress = time.time() + time.sleep(Run.WAIT_PAUSE) + job_ids = [job['job_id'] for job in unfinished_jobs] + log.debug('wait for jobs ' + str(job_ids)) + jobs = reporter.get_jobs(name, fields=['job_id', 'status', + 'description', 'log_href']) + # dead, fail, pass : show fail/dead jobs first + jobs = sorted(jobs, key=lambda x: x['status']) + for job in jobs: + if upload_url: + url = os.path.join(upload_url, name, job['job_id']) + else: + url = job['log_href'] + log.info(f"{job['status']} {url} {job['description']}") + return exit_code diff --git a/teuthology/suite/build_matrix.py b/teuthology/suite/build_matrix.py new file mode 100644 index 0000000000..e9ee9e60cb --- /dev/null +++ b/teuthology/suite/build_matrix.py @@ -0,0 +1,209 @@ +import logging +import os +import random + +from teuthology.suite import matrix + +log = logging.getLogger(__name__) + + +def build_matrix(path, subset=None, no_nested_subset=False, seed=None): + """ + Return a list of items descibed by path such that if the list of + items is chunked into mincyclicity pieces, each piece is still a + good subset of the suite. + + A good subset of a product ensures that each facet member appears + at least once. A good subset of a sum ensures that the subset of + each sub collection reflected in the subset is a good subset. + + A mincyclicity of 0 does not attempt to enforce the good subset + property. + + The input is just a path. The output is an array of (description, + [file list]) tuples. + + For a normal file we generate a new item for the result list. + + For a directory, we (recursively) generate a new item for each + file/dir. + + For a directory with a magic '+' file, we generate a single item + that concatenates all files/subdirs (A Sum). + + For a directory with a magic '%' file, we generate a result set + for each item in the directory, and then do a product to generate + a result list with all combinations (A Product). If the file + contains an integer, it is used as the divisor for a random + subset. + + For a directory with a magic '$' file, or for a directory whose name + ends in '$', we generate a list of all items that we will randomly + choose from. + + The final description (after recursion) for each item will look + like a relative path. If there was a % product, that path + component will appear as a file with braces listing the selection + of chosen subitems. + + :param path: The path to search for yaml fragments + :param subset: (index, outof) + :param no_nested_subset: disable nested subsets + :param seed: The seed for repeatable random test + """ + if subset: + log.info( + 'Subset=%s/%s' % + (str(subset[0]), str(subset[1])) + ) + if no_nested_subset: + log.info("no_nested_subset") + random.seed(seed) + mat, first, matlimit = _get_matrix(path, subset, no_nested_subset) + return generate_combinations(path, mat, first, matlimit) + + +def _get_matrix(path, subset=None, no_nested_subset=False): + (which, divisions) = (0,1) if subset is None else subset + if divisions > 1: + mat = _build_matrix(path, mincyclicity=divisions, no_nested_subset=no_nested_subset) + mat = matrix.Subset(mat, divisions, which=which) + else: + mat = _build_matrix(path, no_nested_subset=no_nested_subset) + return mat, 0, mat.size() + + +def _build_matrix(path, mincyclicity=0, no_nested_subset=False, item=''): + if os.path.basename(path)[0] == '.': + return None + if not os.path.exists(path): + raise IOError('%s does not exist (abs %s)' % (path, os.path.abspath(path))) + if os.path.isfile(path): + if path.endswith('.yaml'): + return matrix.Base(item) + return None + if os.path.isdir(path): + if path.endswith('.disable'): + return None + files = sorted(os.listdir(path)) + if len(files) == 0: + return None + if '+' in files: + # concatenate items + files.remove('+') + submats = [] + for fn in sorted(files): + submat = _build_matrix( + os.path.join(path, fn), + mincyclicity, + no_nested_subset, + fn) + if submat is not None: + submats.append(submat) + return matrix.Concat(item, submats) + elif path.endswith('$') or '$' in files: + # pick a random item -- make sure we don't pick any magic files + if '$' in files: + files.remove('$') + if '%' in files: + files.remove('%') + submats = [] + for fn in sorted(files): + submat = _build_matrix( + os.path.join(path, fn), + mincyclicity, + no_nested_subset, + fn) + if submat is not None: + submats.append(submat) + return matrix.PickRandom(item, submats) + elif '%' in files: + # convolve items + files.remove('%') + with open(os.path.join(path, '%')) as f: + divisions = f.read() + if no_nested_subset or len(divisions) == 0: + divisions = 1 + else: + divisions = int(divisions) + assert divisions > 0 + submats = [] + for fn in sorted(files): + submat = _build_matrix( + os.path.join(path, fn), + 0, + no_nested_subset, + fn) + if submat is not None: + submats.append(submat) + mat = matrix.Product(item, submats) + minc = mincyclicity * divisions + if mat and mat.cyclicity() < minc: + mat = matrix.Cycle( + (minc + mat.cyclicity() - 1) // mat.cyclicity(), mat + ) + if divisions > 1: + mat = matrix.Subset(mat, divisions) + return mat + else: + # list items + submats = [] + for fn in sorted(files): + submat = _build_matrix( + os.path.join(path, fn), + mincyclicity, + no_nested_subset, + fn) + if submat is None: + continue + if submat.cyclicity() < mincyclicity: + submat = matrix.Cycle( + ((mincyclicity + submat.cyclicity() - 1) // + submat.cyclicity()), + submat) + submats.append(submat) + return matrix.Sum(item, submats) + assert False, "Invalid path %s seen in _build_matrix" % path + return None + + +def generate_combinations(path, mat, generate_from, generate_to): + """ + Return a list of items describe by path + + The input is just a path. The output is an array of (description, + [file list]) tuples. + + For a normal file we generate a new item for the result list. + + For a directory, we (recursively) generate a new item for each + file/dir. + + For a directory with a magic '+' file, we generate a single item + that concatenates all files/subdirs. + + For a directory with a magic '%' file, we generate a result set + for each item in the directory, and then do a product to generate + a result list with all combinations. + + The final description (after recursion) for each item will look + like a relative path. If there was a % product, that path + component will appear as a file with braces listing the selection + of chosen subitems. + """ + ret = [] + for i in range(generate_from, generate_to): + output = mat.index(i) + ret.append(( + matrix.generate_desc(combine_path, output).replace('.yaml', ''), + matrix.generate_paths(path, output, combine_path))) + return ret + + +def combine_path(left, right): + """ + os.path.join(a, b) doesn't like it when b is None + """ + if right: + return os.path.join(left, right) + return left diff --git a/teuthology/suite/fragment-merge.lua b/teuthology/suite/fragment-merge.lua new file mode 100644 index 0000000000..856bdedb49 --- /dev/null +++ b/teuthology/suite/fragment-merge.lua @@ -0,0 +1,104 @@ +-- allow only some Lua (and lunatic) builtins for use by scripts +local lua_allowlist = { + assert = assert, + error = error, + ipairs = ipairs, + next = next, + pairs = pairs, + tonumber = tonumber, + tostring = tostring, + py_attrgetter = python.as_attrgetter, + py_dict = python.builtins.dict, + py_list = python.builtins.list, + py_tuple = python.builtins.tuple, + py_enumerate = python.enumerate, + py_iterex = python.iterex, + py_itemgetter = python.as_itemgetter, + math = math, +} +lua_allowlist.__index = lua_allowlist + +-- accept a fragment/config (or just return true from the script!) +local function accept() + coroutine.yield(true) +end +-- reject a fragment/config (or just return false from the script!) +local function reject() + coroutine.yield(false) +end +-- this implements logic for filtering (via teuthology-suite CLI flags) +local function matches(_ENV, f) + if description:find(f, 1, true) then + return true + end + if filter_fragments then + for i,path in py_enumerate(base_frag_paths) do + if path:find(f) then + return true + end + end + end +end + +local function check_filters(_ENV) + if filter_all then + for i,f in py_enumerate(filter_all) do + if not matches(_ENV, f) then + reject() + end + end + end + if filter_in then + local found, tried = false, false + for i,f in py_enumerate(filter_in) do + tried = true + if matches(_ENV, f) then + found = true + break + end + end + if tried and not found then + reject() + end + end + if filter_out then + for i,f in py_enumerate(filter_out) do + if matches(_ENV, f) then + reject() + end + end + end +end + +function new_script(script, log, deep_merge, yaml_load) + -- create a restricted sandbox for the script: + local env = setmetatable({ + accept = accept, + deep_merge = deep_merge, + log = log, + reject = reject, + yaml_load = yaml_load, + }, lua_allowlist) + + -- avoid putting check_filters in _ENV + -- try to keep line numbers correct: + local header = [[do local check_filters = ...; accept(); check_filters(_ENV) end local function main() do ]] + local footer = [[ end return true end return main()]] + local function chunks() + coroutine.yield(header) + if #script > 0 then + coroutine.yield(script) + end + coroutine.yield(footer) + end + + -- put the script in a coroutine so we can yield success/failure from + -- anywhere in the script, including in nested function calls. + local f, err = load(coroutine.wrap(chunks), 'teuthology', 't', env) + if f == nil then + error("failure to load script: "..err) + end + f = coroutine.wrap(f) + f(check_filters) + return env, f +end diff --git a/teuthology/suite/matrix.py b/teuthology/suite/matrix.py new file mode 100644 index 0000000000..e713bc4433 --- /dev/null +++ b/teuthology/suite/matrix.py @@ -0,0 +1,388 @@ +import os +import random +import heapq +from math import gcd +from functools import reduce + +def lcm(a, b): + return a*b // gcd(a, b) +def lcml(l): + return reduce(lcm, l) + +class Matrix: + """ + Interface for sets + """ + def size(self): + pass + + def index(self, i): + """ + index() should return a recursive structure represending the paths + to concatenate for index i: + + Result :: (PathSegment, Result) | {Result} + Path :: string + + {Result} is a frozen_set of Results indicating that + the set of paths resulting from each of the contained + Results should be concatenated. (PathSegment, Result) + indicates that PathSegment should be prepended to the + paths resulting from Result. + """ + pass + + def minscanlen(self): + """ + min run require to get a good sample + """ + pass + + def cyclicity(self): + """ + A cyclicity of N means that the set represented by the Matrix + can be chopped into N good subsets of sequential indices. + """ + return self.size() // self.minscanlen() + + def tostr(self, depth): + pass + + def __str__(self): + """ + str method + """ + return self.tostr(0) + + +class Cycle(Matrix): + """ + Run a matrix multiple times + """ + def __init__(self, num, mat): + self.mat = mat + self.num = num + + def size(self): + return self.mat.size() * self.num + + def index(self, i): + return self.mat.index(i % self.mat.size()) + + def minscanlen(self): + return self.mat.minscanlen() + + def tostr(self, depth): + return '\t'*depth + "Cycle({num}):\n".format(num=self.num) + self.mat.tostr(depth + 1) + +# Logically, inverse of Cycle +class Subset(Matrix): + """ + Run a matrix subset. + """ + def __init__(self, mat, divisions, which=None): + self.mat = mat + self.divisions = divisions + if which is None: + self.which = random.randint(0, divisions-1) + else: + assert which < divisions + self.which = which + + def size(self): + return self.mat.size() // self.divisions + + def index(self, i): + i += self.which * self.size() + assert i < self.mat.size() + return self.mat.index(i) + + def minscanlen(self): + return self.mat.minscanlen() + + def tostr(self, depth): + return '\t'*depth + "Subset({num}, {index}):\n".format(num=self.num, index=self.index) + self.mat.tostr(depth + 1) + + +class Base(Matrix): + """ + Just a single item. + """ + def __init__(self, item): + self.item = item + + def size(self): + return 1 + + def index(self, i): + return self.item + + def minscanlen(self): + return 1 + + def tostr(self, depth): + return '\t'*depth + "Base({item})\n".format(item=self.item) + + +class Product(Matrix): + """ + Builds items by taking one item from each submatrix. Contiguous + subsequences should move through all dimensions. + """ + def __init__(self, item, _submats): + assert len(_submats) > 0, \ + "Product requires child submats to be passed in" + self.item = item + + submats = sorted( + [((i.size(), ind), i) for (i, ind) in + zip(_submats, range(len(_submats)))], reverse=True) + self.submats = [] + self._size = 1 + for ((size, _), submat) in submats: + self.submats.append((self._size, submat)) + self._size *= size + self.submats.reverse() + + self._minscanlen = max([i.minscanlen() for i in _submats]) + if self._minscanlen + 1 > self._size: + self._minscanlen = self._size + else: + self._minscanlen += 1 + + def tostr(self, depth): + ret = '\t'*depth + "Product({item}):\n".format(item=self.item) + return ret + ''.join([i[1].tostr(depth+1) for i in self.submats]) + + def minscanlen(self): + return self._minscanlen + + def size(self): + return self._size + + def _index(self, i, submats): + """ + We recursively reduce the N dimension problem to a two + dimension problem. + + index(i) = (lmat.index(i % lmat.size()), rmat.index(i % + rmat.size())) would simply work if lmat.size() and rmat.size() + are relatively prime. + + In general, if the gcd(lmat.size(), rmat.size()) == N, + index(i) would be periodic on the interval (lmat.size() * + rmat.size()) / N. To adjust, we decrement the lmat index + number on each repeat. Each of the N repeats must therefore + be distinct from the previous ones resulting in lmat.size() * + rmat.size() combinations. + """ + assert len(submats) > 0, \ + "_index requires non-empty submats" + if len(submats) == 1: + return frozenset([submats[0][1].index(i)]) + + lmat = submats[0][1] + lsize = lmat.size() + + rsize = submats[0][0] + + cycles = gcd(rsize, lsize) + clen = (rsize * lsize) // cycles + off = (i // clen) % cycles + + def combine(r, s=frozenset()): + if isinstance(r, frozenset): + return s | r + return s | frozenset([r]) + + litems = lmat.index((i - off) % lmat.size()) + ritems = self._index(i, submats[1:]) + return combine(litems, combine(ritems)) + + def index(self, i): + items = self._index(i, self.submats) + return (self.item, items) + +class Concat(Matrix): + """ + Concatenates all items in child matrices + """ + def __init__(self, item, submats): + self.submats = submats + self.item = item + + def size(self): + return 1 + + def minscanlen(self): + return 1 + + def index(self, i): + out = frozenset() + for submat in self.submats: + for i in range(submat.size()): + out = out | frozenset([submat.index(i)]) + return (self.item, out) + + def tostr(self, depth): + ret = '\t'*depth + "Concat({item}):\n".format(item=self.item) + return ret + ''.join([i.tostr(depth+1) for i in self.submats]) + +class PickRandom(Matrix): + """ + Select a random item from the child matrices. + """ + def __init__(self, item, submats): + self.submats = submats + self.item = item + + def size(self): + return 1 + + def minscanlen(self): + return 1 + + def index(self, i): + indx = random.randint(0, len(self.submats) - 1) + submat = self.submats[indx] + out = frozenset([submat.index(indx)]) + return (self.item, out) + + def tostr(self, depth): + ret = '\t'*depth + "PickRandom({item}):\n".format(item=self.item) + return ret + ''.join([i.tostr(depth+1) for i in self.submats]) + +class Sum(Matrix): + """ + We want to mix the subsequences proportionately to their size. + + The intuition is that we map all of the subsequences uniformly + onto rational numbers in [0, 1). The ith subsequence with length + l will have index k map onto i* + k*(1/l). i* + ensures that no two subsequences have an index which shares a + mapping in [0, 1) as long as is chosen to be small + enough. + + Rather than actually dealing with rational numbers, however, we'll + instead map onto whole numbers in [0, pseudo_size) where + pseudo_size is the lcm of the subsequence lengths * the number of + subsequences. Including the number of subsequences in the product + allows us to use 1 as . For each subsequence, we designate + an offset (position in input list) and a multiple (pseudo_size / size) + such that the psuedo_index for index i is + i*. + + I don't have a good way to map index to pseudo index, so we'll + precompute a mapping in the constructor (self._i_so_sis) from + index to (subset_index, subset). + """ + def __init__(self, item, _submats): + assert len(_submats) > 0, \ + f"Sum requires non-empty _submats: {item}" + self.item = item + + self._pseudo_size = lcml((i.size() for i in _submats)) * len(_submats) + self._size = sum((i.size() for i in _submats)) + self._submats = [ + ((i, self._pseudo_size // s.size()), s) for (i, s) in \ + zip(range(len(_submats)), _submats) + ] + + def sm_to_pmsl(offset_multiple_submat): + """ + offset_multiple_submat tuple to pseudo minscanlen + """ + ((offset, multiple), submat) = offset_multiple_submat + + return submat.minscanlen() * multiple + + def index_to_pindex_generator(submats): + assert len(submats) > 0, "submats must be non-empty" + h = [] + for (offset, multiple), submat in submats: + heapq.heappush(h, (offset, 0, multiple, submat)) + while True: + cur, si, multiple, submat = heapq.heappop(h) + heapq.heappush( + h, + (cur + multiple, si + 1, multiple, submat)) + yield si, submat + + self._i_to_sis = dict( + zip(range(self._size), index_to_pindex_generator(self._submats)) + ) + + self._minscanlen = self.pseudo_index_to_index( + max(map(sm_to_pmsl, self._submats))) + + def pi_to_sis(self, pi, offset_multiple): + """ + offset_multiple tuple of offset and multiple + + max(i) s.t. offset + i*multiple <= pi + """ + (offset, multiple) = offset_multiple + if pi < offset: + return -1 + return (pi - offset) // multiple + + def pseudo_index_to_index(self, pi): + """ + Count all pseudoindex values <= pi with corresponding subset indices + """ + return sum((self.pi_to_sis(pi, i) + 1 for i, _ in self._submats)) - 1 + + def tostr(self, depth): + ret = '\t'*depth + "Sum({item}):\n".format(item=self.item) + return ret + ''.join([i[1].tostr(depth+1) for i in self._submats]) + + def minscanlen(self): + return self._minscanlen + + def size(self): + return self._size + + def index(self, i): + si, submat = self._i_to_sis[i % self._size] + return (self.item, submat.index(si)) + +def generate_lists(result): + """ + Generates a set of tuples representing paths to concatenate + """ + if isinstance(result, frozenset): + ret = [] + for i in result: + ret.extend(generate_lists(i)) + return frozenset(ret) + elif isinstance(result, tuple): + ret = [] + (item, children) = result + for f in generate_lists(children): + nf = [item] + nf.extend(f) + ret.append(tuple(nf)) + return frozenset(ret) + else: + return frozenset([(result,)]) + + +def generate_paths(path, result, joinf=os.path.join): + """ + Generates from the result set a list of sorted paths to concatenate + """ + return [reduce(joinf, i, path) for i in sorted(generate_lists(result))] + + +def generate_desc(joinf, result): + """ + Generates the text description of the test represented by result + """ + if isinstance(result, frozenset): + ret = sorted([generate_desc(joinf, i) for i in result]) + return '{' + ' '.join(ret) + '}' + elif isinstance(result, tuple): + (item, children) = result + cdesc = generate_desc(joinf, children) + return joinf(str(item), cdesc) + else: + return str(result) diff --git a/teuthology/suite/merge.py b/teuthology/suite/merge.py new file mode 100644 index 0000000000..647fe6e1d5 --- /dev/null +++ b/teuthology/suite/merge.py @@ -0,0 +1,170 @@ +import copy +import logging +import lupa +import os +from types import MappingProxyType +import yaml + +from teuthology.suite.build_matrix import combine_path +from teuthology.suite.util import strip_fragment_path +from teuthology.misc import deep_merge + +log = logging.getLogger(__name__) + +TEUTHOLOGY_TEMPLATE = MappingProxyType({ + "teuthology": { + "fragments_dropped": [], + "meta": MappingProxyType({}), + "postmerge": [], + } +}) + +L = lupa.LuaRuntime() +FRAGMENT_MERGE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fragment-merge.lua") +with open(FRAGMENT_MERGE) as f: + L.execute(f.read()) + +def config_merge(configs, suite_name=None, **kwargs): + """ + This procedure selects and merges YAML fragments for each job in the + configs array generated for the matrix of jobs. + + The primary task here is to run premerge and postmerge scripts specified + with the YAML fragments as part of filtering out jobs or individual YAML + fragments. This is done with Lua scripting (via "lupa", a "lunatic" + derivative). + + A premerge script looks like: + + + teuthology: + premerge: | + if yaml.os_type == 'ubuntu' then reject() end + + + This script runs prior to a YAML fragment merging into the complete YAML + specification for a job. The script has access to the complete YAML + description generated so far as part of merging earlier fragments + (remember: fragments are ordered lexicographically). In the above case, the + os_type is checked with the foo.yaml fragment dropped if the job is + configured to run on Ubuntu (note: this does not account for a jobs' + default os_type which is not yet known). + + The postmerge scripts look like: + + + teuthology: + postmerge: + - if yaml.os_type == "ubuntu" then reject() end + + + This script is the same but has a different effect: if, after combining all + the YAML fragments for a job, the os_type is "ubuntu", then the entire job + is dropped (filtered out / rejected). postmerge scripts are also specified + as a list of strings in the teuthology.postmerge array. All of these + strings are concatenated and then executed as a single script. So, + postmerge scripts from multiple fragments are all combined. You may use + this to define variables, functions, or anything else you need. + + Scripts have access to the entire yaml object and may do any desired advanced + checks. It is also possible to programatically change the YAML definition: + + + teuthology: + postmerge: + - | + local attr = py_attrgetter + local tasks = py_list() + for i = 1, 3 do + local task = py_dict( + exec = py_dict(py_list( + py_tuple("mon.a", py_list( + "echo "..i + ) + )) + ) + attr(tasks).append(task) + end + deep_merge(yaml.tasks, tasks) + + + This will be as if the yaml file contained: + + + tasks: + exec: + mon.a: + - echo 1 + exec: + mon.a: + - echo 2 + exec: + mon.a: + - echo 3 + + + Which will be merged normally (via deep_merge) after the script is run. + + Scripts are well sandboxed with access to a small selection of the Lua + builtin libraries. There is also access to some python/lupa specific + functions which are prefixed with "py_". No I/O or other system functions + permitted. + + The teuthology-suite filtering options are now implemented via builtin + postmerge scripts. Logically, if a filter matches then reject will drop + the entire job (config) from the list. + """ + + new_script = L.eval('new_script') + yaml_cache = {} + for desc, paths in configs: + log.debug("merging config %s", desc) + + if suite_name is not None: + desc = combine_path(suite_name, desc) + + yaml_complete_obj = {} + deep_merge(yaml_complete_obj, TEUTHOLOGY_TEMPLATE) + for path in paths: + if path not in yaml_cache: + with open(path) as f: + txt = f.read() + yaml_cache[path] = (txt, yaml.safe_load(txt)) + + yaml_fragment_txt, yaml_fragment_obj = yaml_cache[path] + if yaml_fragment_obj is None: + continue + yaml_fragment_obj = copy.deepcopy(yaml_fragment_obj) + premerge = yaml_fragment_obj.get('teuthology', {}).pop('premerge', '') + if premerge: + log.debug("premerge script running:\n%s", premerge) + env, script = new_script(premerge, log, deep_merge, yaml.safe_load) + env['base_frag_paths'] = [strip_fragment_path(x) for x in paths] + env['description'] = desc + env['frag_paths'] = paths + env['suite_name'] = suite_name + env['yaml'] = yaml_complete_obj + env['yaml_fragment'] = yaml_fragment_obj + for k,v in kwargs.items(): + env[k] = v + if not script(): + log.debug("skipping merge of fragment %s due to premerge filter", path) + yaml_complete_obj['teuthology']['fragments_dropped'].append(path) + continue + deep_merge(yaml_complete_obj, yaml_fragment_obj) + + postmerge = yaml_complete_obj.get('teuthology', {}).get('postmerge', []) + postmerge = "\n".join(postmerge) + log.debug("postmerge script running:\n%s", postmerge) + env, script = new_script(postmerge, log, deep_merge, yaml.safe_load) + env['base_frag_paths'] = [strip_fragment_path(x) for x in paths] + env['description'] = desc + env['frag_paths'] = paths + env['suite_name'] = suite_name + env['yaml'] = yaml_complete_obj + for k,v in kwargs.items(): + env[k] = v + if not script(): + log.debug("skipping config %s due to postmerge filter", desc) + continue + yield desc, paths, yaml_complete_obj diff --git a/teuthology/suite/placeholder.py b/teuthology/suite/placeholder.py new file mode 100644 index 0000000000..37f538e6fa --- /dev/null +++ b/teuthology/suite/placeholder.py @@ -0,0 +1,109 @@ +import copy + + +class Placeholder(object): + """ + A placeholder for use with substitute_placeholders. Simply has a 'name' + attribute. + """ + def __init__(self, name): + self.name = name + + +def substitute_placeholders(input_dict, values_dict): + """ + Replace any Placeholder instances with values named in values_dict. In the + case of None values, the key is omitted from the result. + + Searches through nested dicts. + + :param input_dict: A dict which may contain one or more Placeholder + instances as values. + :param values_dict: A dict, with keys matching the 'name' attributes of all + of the Placeholder instances in the input_dict, and + values to be substituted. + :returns: The modified input_dict + """ + input_dict = copy.deepcopy(input_dict) + + def _substitute(input_dict, values_dict): + for key, value in list(input_dict.items()): + if isinstance(value, dict): + _substitute(value, values_dict) + elif isinstance(value, Placeholder): + if values_dict[value.name] is None: + del input_dict[key] + continue + # If there is a Placeholder without a corresponding entry in + # values_dict, we will hit a KeyError - we want this. + input_dict[key] = values_dict[value.name] + return input_dict + + return _substitute(input_dict, values_dict) + + +# Template for the config that becomes the base for each generated job config +dict_templ = { + 'branch': Placeholder('ceph_branch'), + 'sha1': Placeholder('ceph_hash'), + 'teuthology_branch': Placeholder('teuthology_branch'), + 'teuthology_sha1': Placeholder('teuthology_sha1'), + 'archive_upload': Placeholder('archive_upload'), + 'archive_upload_key': Placeholder('archive_upload_key'), + 'machine_type': Placeholder('machine_type'), + 'nuke-on-error': True, + 'os_type': Placeholder('distro'), + 'os_version': Placeholder('distro_version'), + 'overrides': { + 'admin_socket': { + 'branch': Placeholder('ceph_branch'), + }, + 'ceph': { + 'conf': { + 'mon': { + 'debug mon': 20, + 'debug ms': 1, + 'debug paxos': 20}, + 'mgr': { + 'debug mgr': 20, + 'debug ms': 1}, + 'osd': { + 'debug ms': 1, + 'debug osd': 20 + } + }, + 'flavor': Placeholder('flavor'), + 'log-ignorelist': ['\(MDS_ALL_DOWN\)', + '\(MDS_UP_LESS_THAN_MAX\)'], + 'sha1': Placeholder('ceph_hash'), + }, + 'ceph-deploy': { + 'conf': { + 'client': { + 'log file': '/var/log/ceph/ceph-$name.$pid.log' + }, + 'mon': { + 'osd default pool size': 2 + } + } + }, + 'install': { + 'ceph': { + 'sha1': Placeholder('ceph_hash'), + 'flavor': Placeholder('flavor'), + } + }, + 'workunit': { + 'branch': Placeholder('suite_branch'), + 'sha1': Placeholder('suite_hash'), + } + }, + 'repo': Placeholder('ceph_repo'), + 'sleep_before_teardown': 0, + 'suite': Placeholder('suite'), + 'suite_repo': Placeholder('suite_repo'), + 'suite_relpath': Placeholder('suite_relpath'), + 'suite_branch': Placeholder('suite_branch'), + 'suite_sha1': Placeholder('suite_hash'), + 'tasks': [], +} diff --git a/teuthology/suite/run.py b/teuthology/suite/run.py new file mode 100644 index 0000000000..ab2d8f064f --- /dev/null +++ b/teuthology/suite/run.py @@ -0,0 +1,707 @@ +import copy +import logging +import os +import pwd +import yaml +import re +import time + +from humanfriendly import format_timespan + +from datetime import datetime +from tempfile import NamedTemporaryFile +from teuthology import repo_utils + +from teuthology.config import config, JobConfig +from teuthology.exceptions import ( + BranchMismatchError, BranchNotFoundError, CommitNotFoundError, + VersionNotFoundError +) +from teuthology.misc import deep_merge, get_results_url +from teuthology.orchestra.opsys import OS +from teuthology.repo_utils import build_git_url + +from teuthology.suite import util +from teuthology.suite.merge import config_merge +from teuthology.suite.build_matrix import build_matrix +from teuthology.suite.placeholder import substitute_placeholders, dict_templ + +log = logging.getLogger(__name__) + + +class Run(object): + WAIT_MAX_JOB_TIME = 30 * 60 + WAIT_PAUSE = 5 * 60 + __slots__ = ( + 'args', 'name', 'base_config', 'suite_repo_path', 'base_yaml_paths', + 'base_args', 'package_versions', 'kernel_dict', 'config_input', + 'timestamp', 'user', + ) + + def __init__(self, args): + """ + args must be a config.YamlConfig object + """ + self.args = args + # We assume timestamp is a datetime.datetime object + self.timestamp = self.args.timestamp or \ + datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + self.user = self.args.user or pwd.getpwuid(os.getuid()).pw_name + + self.name = self.make_run_name() + + if self.args.ceph_repo: + config.ceph_git_url = self.args.ceph_repo + if self.args.suite_repo: + config.ceph_qa_suite_git_url = self.args.suite_repo + + self.base_config = self.create_initial_config() + # caches package versions to minimize requests to gbs + self.package_versions = dict() + + # Interpret any relative paths as being relative to ceph-qa-suite + # (absolute paths are unchanged by this) + self.base_yaml_paths = [os.path.join(self.suite_repo_path, b) for b in + self.args.base_yaml_paths] + + def make_run_name(self): + """ + Generate a run name. A run name looks like: + teuthology-2014-06-23_19:00:37-rados-dumpling-testing-basic-plana + """ + worker = util.get_worker(self.args.machine_type) + return '-'.join( + [ + self.user, + str(self.timestamp), + self.args.suite, + self.args.ceph_branch, + self.args.kernel_branch or '-', + self.args.flavor, worker + ] + ).replace('/', ':') + + def create_initial_config(self): + """ + Put together the config file used as the basis for each job in the run. + Grabs hashes for the latest ceph, kernel and teuthology versions in the + branches specified and specifies them so we know exactly what we're + testing. + + :returns: A JobConfig object + """ + self.kernel_dict = self.choose_kernel() + ceph_hash = self.choose_ceph_hash() + # We don't store ceph_version because we don't use it yet outside of + # logging. + self.choose_ceph_version(ceph_hash) + suite_branch = self.choose_suite_branch() + suite_hash = self.choose_suite_hash(suite_branch) + if self.args.suite_dir: + self.suite_repo_path = self.args.suite_dir + else: + self.suite_repo_path = util.fetch_repos( + suite_branch, test_name=self.name, dry_run=self.args.dry_run) + teuthology_branch, teuthology_sha1 = self.choose_teuthology_branch() + + + if self.args.distro_version: + self.args.distro_version, _ = \ + OS.version_codename(self.args.distro, self.args.distro_version) + self.config_input = dict( + suite=self.args.suite, + suite_branch=suite_branch, + suite_hash=suite_hash, + ceph_branch=self.args.ceph_branch, + ceph_hash=ceph_hash, + ceph_repo=config.get_ceph_git_url(), + teuthology_branch=teuthology_branch, + teuthology_sha1=teuthology_sha1, + machine_type=self.args.machine_type, + distro=self.args.distro, + distro_version=self.args.distro_version, + archive_upload=config.archive_upload, + archive_upload_key=config.archive_upload_key, + suite_repo=config.get_ceph_qa_suite_git_url(), + suite_relpath=self.args.suite_relpath, + flavor=self.args.flavor, + ) + return self.build_base_config() + + def choose_kernel(self): + # Put together a stanza specifying the kernel hash + if self.args.kernel_branch == 'distro': + kernel_hash = 'distro' + # Skip the stanza if '-k none' is given + elif self.args.kernel_branch is None or \ + self.args.kernel_branch.lower() == 'none': + kernel_hash = None + else: + kernel_hash = util.get_gitbuilder_hash( + 'kernel', self.args.kernel_branch, 'default', + self.args.machine_type, self.args.distro, + self.args.distro_version, + ) + if not kernel_hash: + util.schedule_fail( + "Kernel branch '{branch}' not found".format( + branch=self.args.kernel_branch), + dry_run=self.args.dry_run, + ) + if kernel_hash: + log.info("kernel sha1: {hash}".format(hash=kernel_hash)) + kernel_dict = dict(kernel=dict(kdb=True, sha1=kernel_hash)) + if kernel_hash != 'distro': + kernel_dict['kernel']['flavor'] = 'default' + else: + kernel_dict = dict() + return kernel_dict + + def choose_ceph_hash(self): + """ + Get the ceph hash: if --sha1/-S is supplied, use it if it is valid, and + just keep the ceph_branch around. Otherwise use the current git branch + tip. + """ + repo_name = self.ceph_repo_name + + if self.args.ceph_sha1: + ceph_hash = self.args.ceph_sha1 + if self.args.validate_sha1: + ceph_hash = util.git_validate_sha1(repo_name, ceph_hash) + if not ceph_hash: + exc = CommitNotFoundError( + self.args.ceph_sha1, + '%s.git' % repo_name + ) + util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run) + log.info("ceph sha1 explicitly supplied") + + elif self.args.ceph_branch: + ceph_hash = util.git_ls_remote( + self.args.ceph_repo, self.args.ceph_branch) + if not ceph_hash: + exc = BranchNotFoundError( + self.args.ceph_branch, + '%s.git' % repo_name + ) + util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run) + + log.info("ceph sha1: {hash}".format(hash=ceph_hash)) + return ceph_hash + + def choose_ceph_version(self, ceph_hash): + if config.suite_verify_ceph_hash and not self.args.newest: + # don't bother if newest; we'll search for an older one + # Get the ceph package version + try: + ceph_version = util.package_version_for_hash( + ceph_hash, self.args.flavor, self.args.distro, + self.args.distro_version, self.args.machine_type, + ) + except Exception as exc: + util.schedule_fail(str(exc), self.name, dry_run=self.args.dry_run) + log.info("ceph version: {ver}".format(ver=ceph_version)) + return ceph_version + else: + log.info('skipping ceph package verification') + + def choose_teuthology_branch(self): + """Select teuthology branch, check if it is present in repo and return + tuple (branch, hash) where hash is commit sha1 corresponding + to the HEAD of the branch. + + The branch name value is determined in the following order: + + Use ``--teuthology-branch`` argument value if supplied. + + Use ``TEUTH_BRANCH`` environment variable value if declared. + + If file ``qa/.teuthology_branch`` can be found in the suite repo + supplied with ``--suite-repo`` or ``--suite-dir`` and contains + non-empty string then use it as the branch name. + + Use ``teuthology_branch`` value if it is set in the one + of the teuthology config files ``$HOME/teuthology.yaml`` + or ``/etc/teuthology.yaml`` correspondingly. + + Use ``main``. + + Generate exception if the branch is not present in the repo. + + """ + teuthology_branch = self.args.teuthology_branch + if not teuthology_branch: + teuthology_branch = os.environ.get('TEUTH_BRANCH', None) + if not teuthology_branch: + branch_file_path = self.suite_repo_path + '/qa/.teuthology_branch' + log.debug('Check file %s exists', branch_file_path) + if os.path.exists(branch_file_path): + log.debug('Found teuthology branch config file %s', + branch_file_path) + with open(branch_file_path) as f: + teuthology_branch = f.read().strip() + if teuthology_branch: + log.debug( + 'The teuthology branch is overridden with %s', + teuthology_branch) + else: + log.warning( + 'The teuthology branch config is empty, skipping') + if not teuthology_branch: + teuthology_branch = config.get('teuthology_branch') + + if config.teuthology_path: + actual_branch = repo_utils.current_branch(config.teuthology_path) + if teuthology_branch and actual_branch != teuthology_branch: + raise BranchMismatchError( + teuthology_branch, + config.teuthology_path, + "config.teuthology_path is set", + ) + if not teuthology_branch: + teuthology_branch = actual_branch + teuthology_sha1 = util.git_ls_remote( + f"file://{config.teuthology_path}", + teuthology_branch + ) + else: + if not teuthology_branch: + teuthology_branch = 'main' + teuthology_sha1 = util.git_ls_remote( + 'teuthology', + teuthology_branch + ) + if not teuthology_sha1: + exc = BranchNotFoundError(teuthology_branch, build_git_url('teuthology')) + util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run) + log.info("teuthology branch: %s %s", teuthology_branch, teuthology_sha1) + return teuthology_branch, teuthology_sha1 + + @property + def ceph_repo_name(self): + if self.args.ceph_repo: + return self._repo_name(self.args.ceph_repo) + else: + return 'ceph' + + @property + def suite_repo_name(self): + if self.args.suite_repo: + return self._repo_name(self.args.suite_repo) + else: + return 'ceph-qa-suite' + + @staticmethod + def _repo_name(url): + return re.sub('\.git$', '', url.split('/')[-1]) + + def choose_suite_branch(self): + suite_repo_name = self.suite_repo_name + suite_repo_project_or_url = self.args.suite_repo or 'ceph-qa-suite' + suite_branch = self.args.suite_branch + ceph_branch = self.args.ceph_branch + if suite_branch and suite_branch != 'main': + if not util.git_branch_exists( + suite_repo_project_or_url, + suite_branch + ): + exc = BranchNotFoundError(suite_branch, suite_repo_name) + util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run) + elif not suite_branch: + # Decide what branch of the suite repo to use + if util.git_branch_exists(suite_repo_project_or_url, ceph_branch): + suite_branch = ceph_branch + else: + log.info( + "branch {0} not in {1}; will use main for" + " ceph-qa-suite".format( + ceph_branch, + suite_repo_name + )) + suite_branch = 'main' + return suite_branch + + def choose_suite_hash(self, suite_branch): + suite_repo_name = self.suite_repo_name + suite_repo_project_or_url = self.args.suite_repo or 'ceph-qa-suite' + suite_hash = util.git_ls_remote( + suite_repo_project_or_url, + suite_branch + ) + if not suite_hash: + exc = BranchNotFoundError(suite_branch, suite_repo_name) + util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run) + log.info("%s branch: %s %s", suite_repo_name, suite_branch, suite_hash) + return suite_hash + + def build_base_config(self): + conf_dict = substitute_placeholders(dict_templ, self.config_input) + conf_dict.update(self.kernel_dict) + job_config = JobConfig.from_dict(conf_dict) + job_config.name = self.name + job_config.user = self.user + job_config.timestamp = self.timestamp + job_config.priority = self.args.priority + if self.args.email: + job_config.email = self.args.email + if self.args.owner: + job_config.owner = self.args.owner + if self.args.sleep_before_teardown: + job_config.sleep_before_teardown = int(self.args.sleep_before_teardown) + if self.args.rocketchat: + job_config.rocketchat = self.args.rocketchat + return job_config + + def build_base_args(self): + base_args = [ + '--name', self.name, + '--worker', util.get_worker(self.args.machine_type), + ] + if self.args.dry_run: + base_args.append('--dry-run') + if self.args.priority is not None: + base_args.extend(['--priority', str(self.args.priority)]) + if self.args.verbose: + base_args.append('-v') + if self.args.owner: + base_args.extend(['--owner', self.args.owner]) + if self.args.queue_backend: + base_args.extend(['--queue-backend', self.args.queue_backend]) + return base_args + + + def write_rerun_memo(self): + args = copy.deepcopy(self.base_args) + args.append('--first-in-suite') + if self.args.subset: + subset = '/'.join(str(i) for i in self.args.subset) + args.extend(['--subset', subset]) + if self.args.no_nested_subset: + args.extend(['--no-nested-subset']) + args.extend(['--seed', str(self.args.seed)]) + util.teuthology_schedule( + args=args, + dry_run=self.args.dry_run, + verbose=self.args.verbose, + log_prefix="Memo: ") + + + def write_result(self): + arg = copy.deepcopy(self.base_args) + arg.append('--last-in-suite') + if self.base_config.email: + arg.extend(['--email', self.base_config.email]) + if self.args.timeout: + arg.extend(['--timeout', self.args.timeout]) + util.teuthology_schedule( + args=arg, + dry_run=self.args.dry_run, + verbose=self.args.verbose, + log_prefix="Results: ") + results_url = get_results_url(self.base_config.name) + if results_url: + log.info("Test results viewable at %s", results_url) + + + def prepare_and_schedule(self): + """ + Puts together some "base arguments" with which to execute + teuthology-schedule for each job, then passes them and other parameters + to schedule_suite(). Finally, schedules a "last-in-suite" job that + sends an email to the specified address (if one is configured). + """ + self.base_args = self.build_base_args() + + # Make sure the yaml paths are actually valid + for yaml_path in self.base_yaml_paths: + full_yaml_path = os.path.join(self.suite_repo_path, yaml_path) + if not os.path.exists(full_yaml_path): + raise IOError("File not found: " + full_yaml_path) + + num_jobs = self.schedule_suite() + + if num_jobs: + self.write_result() + + def collect_jobs(self, arch, configs, newest=False, limit=0): + jobs_to_schedule = [] + jobs_missing_packages = [] + for description, fragment_paths, parsed_yaml in configs: + if limit > 0 and len(jobs_to_schedule) >= limit: + log.info( + 'Stopped after {limit} jobs due to --limit={limit}'.format( + limit=limit)) + break + + os_type = parsed_yaml.get('os_type') or self.base_config.os_type + os_version = parsed_yaml.get('os_version') or self.base_config.os_version + exclude_arch = parsed_yaml.get('exclude_arch') + exclude_os_type = parsed_yaml.get('exclude_os_type') + + if exclude_arch and exclude_arch == arch: + log.info('Skipping due to excluded_arch: %s facets %s', + exclude_arch, description) + continue + if exclude_os_type and exclude_os_type == os_type: + log.info('Skipping due to excluded_os_type: %s facets %s', + exclude_os_type, description) + continue + + arg = copy.deepcopy(self.base_args) + arg.extend([ + '--num', str(self.args.num), + '--description', description, + '--', + ]) + arg.extend(self.base_yaml_paths) + + parsed_yaml_txt = yaml.dump(parsed_yaml) + arg.append('-') + + job = dict( + yaml=parsed_yaml, + desc=description, + sha1=self.base_config.sha1, + args=arg, + stdin=parsed_yaml_txt, + ) + + sha1 = self.base_config.sha1 + if parsed_yaml.get('verify_ceph_hash', + config.suite_verify_ceph_hash): + full_job_config = copy.deepcopy(self.base_config.to_dict()) + deep_merge(full_job_config, parsed_yaml) + flavor = util.get_install_task_flavor(full_job_config) + # Get package versions for this sha1, os_type and flavor. If + # we've already retrieved them in a previous loop, they'll be + # present in package_versions and gitbuilder will not be asked + # again for them. + try: + self.package_versions = util.get_package_versions( + sha1, + os_type, + os_version, + flavor, + self.package_versions + ) + except VersionNotFoundError: + pass + if not util.has_packages_for_distro( + sha1, os_type, os_version, flavor, self.package_versions + ): + m = "Packages for os_type '{os}', flavor {flavor} and " + \ + "ceph hash '{ver}' not found" + log.error(m.format(os=os_type, flavor=flavor, ver=sha1)) + jobs_missing_packages.append(job) + # optimization: one missing package causes backtrack in newest mode; + # no point in continuing the search + if newest: + return jobs_missing_packages, None + + jobs_to_schedule.append(job) + return jobs_missing_packages, jobs_to_schedule + + def schedule_jobs(self, jobs_missing_packages, jobs_to_schedule, name): + for job in jobs_to_schedule: + log.info( + 'Scheduling %s', job['desc'] + ) + + log_prefix = '' + if job in jobs_missing_packages: + log_prefix = "Missing Packages: " + if ( + not self.args.dry_run and + not config.suite_allow_missing_packages + ): + util.schedule_fail( + "At least one job needs packages that don't exist for " + "hash {sha1}.".format(sha1=self.base_config.sha1), + name, + dry_run=self.args.dry_run, + ) + util.teuthology_schedule( + args=job['args'], + dry_run=self.args.dry_run, + verbose=self.args.verbose, + log_prefix=log_prefix, + stdin=job['stdin'], + ) + throttle = self.args.throttle + if not self.args.dry_run and throttle: + log.info("pause between jobs : --throttle " + str(throttle)) + time.sleep(int(throttle)) + + def check_priority(self, jobs_to_schedule): + priority = self.args.priority + msg=f'''Unable to schedule {jobs_to_schedule} jobs with priority {priority}. + +Use the following testing priority +10 to 49: Tests which are urgent and blocking other important development. +50 to 74: Testing a particular feature/fix with less than 25 jobs and can also be used for urgent release testing. +75 to 99: Tech Leads usually schedule integration tests with this priority to verify pull requests against main. +100 to 149: QE validation of point releases. +150 to 199: Testing a particular feature/fix with less than 100 jobs and results will be available in a day or so. +200 to 1000: Large test runs that can be done over the course of a week. +Note: To force run, use --force-priority''' + if priority < 50: + util.schedule_fail(msg, dry_run=self.args.dry_run) + elif priority < 75 and jobs_to_schedule > 25: + util.schedule_fail(msg, dry_run=self.args.dry_run) + elif priority < 150 and jobs_to_schedule > 100: + util.schedule_fail(msg, dry_run=self.args.dry_run) + + def check_num_jobs(self, jobs_to_schedule): + """ + Fail schedule if number of jobs exceeds job threshold. + """ + threshold = self.args.job_threshold + msg=f'''Unable to schedule {jobs_to_schedule} jobs, too many jobs, when maximum {threshold} jobs allowed. + +Note: If you still want to go ahead, use --job-threshold 0''' + if threshold and jobs_to_schedule > threshold: + util.schedule_fail(msg, dry_run=self.args.dry_run) + + def schedule_suite(self): + """ + Schedule the suite-run. Returns the number of jobs scheduled. + """ + name = self.name + if self.args.arch: + arch = self.args.arch + log.debug("Using '%s' as an arch" % arch) + else: + arch = util.get_arch(self.base_config.machine_type) + suite_name = self.base_config.suite + suite_path = os.path.normpath(os.path.join( + self.suite_repo_path, + self.args.suite_relpath, + 'suites', + self.base_config.suite.replace(':', '/'), + )) + log.debug('Suite %s in %s' % (suite_name, suite_path)) + log.debug(f"subset = {self.args.subset}") + log.debug(f"no_nested_subset = {self.args.no_nested_subset}") + configs = build_matrix(suite_path, + subset=self.args.subset, + no_nested_subset=self.args.no_nested_subset, + seed=self.args.seed) + generated = len(configs) + log.info(f'Suite {suite_name} in {suite_path} generated {generated} jobs (not yet filtered or merged)') + configs = list(config_merge(configs, + filter_in=self.args.filter_in, + filter_out=self.args.filter_out, + filter_all=self.args.filter_all, + filter_fragments=self.args.filter_fragments, + suite_name=suite_name)) + + if self.args.dry_run: + log.debug("Base job config:\n%s" % self.base_config) + + # create, but do not write, the temp file here, so it can be + # added to the args in collect_jobs, but not filled until + # any backtracking is done + base_yaml_path = NamedTemporaryFile( + prefix='schedule_suite_', delete=False + ).name + self.base_yaml_paths.insert(0, base_yaml_path) + + # compute job limit in respect of --sleep-before-teardown + job_limit = self.args.limit or 0 + sleep_before_teardown = int(self.args.sleep_before_teardown or 0) + if sleep_before_teardown: + if job_limit == 0: + log.warning('The --sleep-before-teardown option was provided: ' + 'only 1 job will be scheduled. ' + 'Use --limit to run more jobs') + # give user a moment to read this warning + time.sleep(5) + job_limit = 1 + elif self.args.non_interactive: + log.warning( + 'The --sleep-before-teardown option is active. ' + 'There will be a maximum {} jobs running ' + 'which will fall asleep for {}' + .format(job_limit, format_timespan(sleep_before_teardown))) + elif job_limit > 4: + are_you_insane=( + 'There are {total} configs and {maximum} job limit is used. ' + 'Do you really want to lock all machines needed for ' + 'this run for {that_long}? (y/N):' + .format( + that_long=format_timespan(sleep_before_teardown), + total=generated, + maximum=job_limit)) + while True: + insane=(input(are_you_insane) or 'n').lower() + if insane == 'y': + break + elif insane == 'n': + exit(0) + + # if newest, do this until there are no missing packages + # if not, do it once + backtrack = 0 + limit = self.args.newest + while backtrack <= limit: + jobs_missing_packages, jobs_to_schedule = \ + self.collect_jobs(arch, configs, self.args.newest, job_limit) + if jobs_missing_packages and self.args.newest: + new_sha1 = \ + util.find_git_parent('ceph', self.base_config.sha1) + if new_sha1 is None: + util.schedule_fail('Backtrack for --newest failed', name, dry_run=self.args.dry_run) + # rebuild the base config to resubstitute sha1 + self.config_input['ceph_hash'] = new_sha1 + self.base_config = self.build_base_config() + backtrack += 1 + continue + if backtrack: + log.info("--newest supplied, backtracked %d commits to %s" % + (backtrack, self.base_config.sha1)) + break + else: + if self.args.newest: + util.schedule_fail( + 'Exceeded %d backtracks; raise --newest value' % limit, + name, + dry_run=self.args.dry_run, + ) + + if self.args.dry_run: + log.debug("Base job config:\n%s" % self.base_config) + + with open(base_yaml_path, 'w+b') as base_yaml: + base_yaml.write(str(self.base_config).encode()) + + if jobs_to_schedule: + self.write_rerun_memo() + + # Before scheduling jobs, check the priority + if self.args.priority and jobs_to_schedule and not self.args.force_priority: + self.check_priority(len(jobs_to_schedule)) + + self.check_num_jobs(len(jobs_to_schedule)) + + self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name) + + os.remove(base_yaml_path) + + count = len(jobs_to_schedule) + missing_count = len(jobs_missing_packages) + total_count = count + if self.args.num: + total_count *= self.args.num + log.info( + 'Suite %s in %s scheduled %d jobs.' % + (suite_name, suite_path, count) + ) + log.info('%d/%d jobs were filtered out.', + (generated - count), + generated) + if missing_count: + log.warning('Scheduled %d/%d jobs that are missing packages!', + missing_count, count) + log.info('Scheduled %d jobs in total.', total_count) + return count diff --git a/teuthology/suite/test/suites/noop/noop.yaml b/teuthology/suite/test/suites/noop/noop.yaml new file mode 100644 index 0000000000..fb674b1b13 --- /dev/null +++ b/teuthology/suite/test/suites/noop/noop.yaml @@ -0,0 +1,7 @@ +roles: +- - mon.a + - osd.0 +tasks: +- exec: + mon.a: + - echo "Well done !" diff --git a/teuthology/suite/test/test_build_matrix.py b/teuthology/suite/test/test_build_matrix.py new file mode 100644 index 0000000000..d7d2c051be --- /dev/null +++ b/teuthology/suite/test/test_build_matrix.py @@ -0,0 +1,815 @@ +import os +import random + +from mock import patch, MagicMock + +from teuthology.suite import build_matrix +from teuthology.test.fake_fs import make_fake_fstools + + +class TestBuildMatrixSimple(object): + def test_combine_path(self): + result = build_matrix.combine_path("/path/to/left", "right/side") + assert result == "/path/to/left/right/side" + + def test_combine_path_no_right(self): + result = build_matrix.combine_path("/path/to/left", None) + assert result == "/path/to/left" + + +class TestBuildMatrix(object): + + patchpoints = [ + 'os.path.exists', + 'os.listdir', + 'os.path.isfile', + 'os.path.isdir', + 'builtins.open', + ] + + def setup(self): + self.mocks = dict() + self.patchers = dict() + for ppoint in self.__class__.patchpoints: + self.mocks[ppoint] = MagicMock() + self.patchers[ppoint] = patch(ppoint, self.mocks[ppoint]) + + def start_patchers(self, fake_fs): + fake_fns = make_fake_fstools(fake_fs) + # N.B.: relies on fake_fns being in same order as patchpoints + for ppoint, fn in zip(self.__class__.patchpoints, fake_fns): + self.mocks[ppoint].side_effect = fn + self.patchers[ppoint].start() + + def stop_patchers(self): + for patcher in self.patchers.values(): + patcher.stop() + + def teardown(self): + self.patchers.clear() + self.mocks.clear() + + def fragment_occurences(self, jobs, fragment): + # What fraction of jobs contain fragment? + count = 0 + for (description, fragment_list) in jobs: + for item in fragment_list: + if item.endswith(fragment): + count += 1 + return count / float(len(jobs)) + + def test_concatenate_1x2x3(self): + fake_fs = { + 'd0_0': { + '+': None, + 'd1_0': { + 'd1_0_0.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 1 + + def test_convolve_2x2(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'd1_0_0.yaml': None, + 'd1_0_1.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 4 + assert self.fragment_occurences(result, 'd1_1_1.yaml') == 0.5 + + def test_convolve_2x2x2(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'd1_0_0.yaml': None, + 'd1_0_1.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 8 + assert self.fragment_occurences(result, 'd1_2_0.yaml') == 0.5 + + def test_convolve_1x2x4(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'd1_0_0.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + 'd1_2_3.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 8 + assert self.fragment_occurences(result, 'd1_2_2.yaml') == 0.25 + + def test_convolve_with_concat(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'd1_0_0.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + '+': None, + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + 'd1_2_3.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 2 + for i in result: + assert 'd0_0/d1_2/d1_2_0.yaml' in i[1] + assert 'd0_0/d1_2/d1_2_1.yaml' in i[1] + assert 'd0_0/d1_2/d1_2_2.yaml' in i[1] + assert 'd0_0/d1_2/d1_2_3.yaml' in i[1] + + def test_convolve_nested(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'd1_0_0.yaml': None, + '%': '2', + 'd1_0_1': { + 'd1_0_1_0.yaml': None, + 'd1_0_1_1.yaml': None, + }, + 'd1_0_2': { + 'd1_0_2_0.yaml': None, + 'd1_0_2_1.yaml': None, + }, + }, + 'd1_2': { + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + 'd1_2_3.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 8 + assert self.fragment_occurences(result, 'd1_0_0.yaml') == 1 + assert self.fragment_occurences(result, 'd1_0_1_0.yaml') == 0.5 + assert self.fragment_occurences(result, 'd1_0_1_1.yaml') == 0.5 + assert self.fragment_occurences(result, 'd1_0_2_0.yaml') == 0.5 + assert self.fragment_occurences(result, 'd1_0_2_1.yaml') == 0.5 + assert self.fragment_occurences(result, 'd1_2_0.yaml') == 0.25 + assert self.fragment_occurences(result, 'd1_2_1.yaml') == 0.25 + assert self.fragment_occurences(result, 'd1_2_2.yaml') == 0.25 + assert self.fragment_occurences(result, 'd1_2_3.yaml') == 0.25 + + + def test_random_dollar_sign_2x2x3(self): + fake_fs = { + 'd0_0': { + '$': None, + 'd1_0': { + 'd1_0_0.yaml': None, + 'd1_0_1.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + }, + }, + } + fake_fs1 = { + 'd0_0$': { + 'd1_0': { + 'd1_0_0.yaml': None, + 'd1_0_1.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 1 + self.start_patchers(fake_fs1) + try: + result = build_matrix.build_matrix('d0_0$') + finally: + self.stop_patchers() + assert len(result) == 1 + + def test_random_dollar_sign_with_concat(self): + fake_fs = { + 'd0_0': { + '$': None, + 'd1_0': { + 'd1_0_0.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + '+': None, + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + 'd1_2_3.yaml': None, + }, + }, + } + fake_fs1 = { + 'd0_0$': { + 'd1_0': { + 'd1_0_0.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + '+': None, + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + 'd1_2_3.yaml': None, + }, + }, + } + for fs, root in [(fake_fs,'d0_0'), (fake_fs1,'d0_0$')]: + self.start_patchers(fs) + try: + result = build_matrix.build_matrix(root) + finally: + self.stop_patchers() + assert len(result) == 1 + if result[0][0][1:].startswith('d1_2'): + for i in result: + assert os.path.join(root, 'd1_2/d1_2_0.yaml') in i[1] + assert os.path.join(root, 'd1_2/d1_2_1.yaml') in i[1] + assert os.path.join(root, 'd1_2/d1_2_2.yaml') in i[1] + assert os.path.join(root, 'd1_2/d1_2_3.yaml') in i[1] + + def test_random_dollar_sign_with_convolve(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'd1_0_0.yaml': None, + 'd1_0_1.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2': { + '$': None, + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 4 + fake_fs1 = { + 'd0_0': { + '%': None, + 'd1_0': { + 'd1_0_0.yaml': None, + 'd1_0_1.yaml': None, + }, + 'd1_1': { + 'd1_1_0.yaml': None, + 'd1_1_1.yaml': None, + }, + 'd1_2$': { + 'd1_2_0.yaml': None, + 'd1_2_1.yaml': None, + 'd1_2_2.yaml': None, + }, + }, + } + self.start_patchers(fake_fs1) + try: + result = build_matrix.build_matrix('d0_0') + finally: + self.stop_patchers() + assert len(result) == 4 + + def test_emulate_teuthology_noceph(self): + fake_fs = { + 'teuthology': { + 'no-ceph': { + '%': None, + 'clusters': { + 'single.yaml': None, + }, + 'distros': { + 'baremetal.yaml': None, + 'rhel7.0.yaml': None, + 'ubuntu12.04.yaml': None, + 'ubuntu14.04.yaml': None, + 'vps.yaml': None, + 'vps_centos6.5.yaml': None, + 'vps_debian7.yaml': None, + 'vps_rhel6.4.yaml': None, + 'vps_rhel6.5.yaml': None, + 'vps_rhel7.0.yaml': None, + 'vps_ubuntu14.04.yaml': None, + }, + 'tasks': { + 'teuthology.yaml': None, + }, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('teuthology/no-ceph') + finally: + self.stop_patchers() + assert len(result) == 11 + assert self.fragment_occurences(result, 'vps.yaml') == 1 / 11.0 + + def test_empty_dirs(self): + fake_fs = { + 'teuthology': { + 'no-ceph': { + '%': None, + 'clusters': { + 'single.yaml': None, + }, + 'distros': { + 'baremetal.yaml': None, + 'rhel7.0.yaml': None, + 'ubuntu12.04.yaml': None, + 'ubuntu14.04.yaml': None, + 'vps.yaml': None, + 'vps_centos6.5.yaml': None, + 'vps_debian7.yaml': None, + 'vps_rhel6.4.yaml': None, + 'vps_rhel6.5.yaml': None, + 'vps_rhel7.0.yaml': None, + 'vps_ubuntu14.04.yaml': None, + }, + 'tasks': { + 'teuthology.yaml': None, + }, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('teuthology/no-ceph') + finally: + self.stop_patchers() + + fake_fs2 = { + 'teuthology': { + 'no-ceph': { + '%': None, + 'clusters': { + 'single.yaml': None, + }, + 'distros': { + 'empty': {}, + 'baremetal.yaml': None, + 'rhel7.0.yaml': None, + 'ubuntu12.04.yaml': None, + 'ubuntu14.04.yaml': None, + 'vps.yaml': None, + 'vps_centos6.5.yaml': None, + 'vps_debian7.yaml': None, + 'vps_rhel6.4.yaml': None, + 'vps_rhel6.5.yaml': None, + 'vps_rhel7.0.yaml': None, + 'vps_ubuntu14.04.yaml': None, + }, + 'tasks': { + 'teuthology.yaml': None, + }, + 'empty': {}, + }, + }, + } + self.start_patchers(fake_fs2) + try: + result2 = build_matrix.build_matrix('teuthology/no-ceph') + finally: + self.stop_patchers() + assert len(result) == 11 + assert len(result2) == len(result) + + def test_hidden(self): + fake_fs = { + 'teuthology': { + 'no-ceph': { + '%': None, + '.qa': None, + 'clusters': { + 'single.yaml': None, + '.qa': None, + }, + 'distros': { + '.qa': None, + 'baremetal.yaml': None, + 'rhel7.0.yaml': None, + 'ubuntu12.04.yaml': None, + 'ubuntu14.04.yaml': None, + 'vps.yaml': None, + 'vps_centos6.5.yaml': None, + 'vps_debian7.yaml': None, + 'vps_rhel6.4.yaml': None, + 'vps_rhel6.5.yaml': None, + 'vps_rhel7.0.yaml': None, + 'vps_ubuntu14.04.yaml': None, + }, + 'tasks': { + '.qa': None, + 'teuthology.yaml': None, + }, + '.foo': { + '.qa': None, + 'teuthology.yaml': None, + }, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('teuthology/no-ceph') + finally: + self.stop_patchers() + + fake_fs2 = { + 'teuthology': { + 'no-ceph': { + '%': None, + 'clusters': { + 'single.yaml': None, + }, + 'distros': { + 'baremetal.yaml': None, + 'rhel7.0.yaml': None, + 'ubuntu12.04.yaml': None, + 'ubuntu14.04.yaml': None, + 'vps.yaml': None, + 'vps_centos6.5.yaml': None, + 'vps_debian7.yaml': None, + 'vps_rhel6.4.yaml': None, + 'vps_rhel6.5.yaml': None, + 'vps_rhel7.0.yaml': None, + 'vps_ubuntu14.04.yaml': None, + }, + 'tasks': { + 'teuthology.yaml': None, + }, + }, + }, + } + self.start_patchers(fake_fs2) + try: + result2 = build_matrix.build_matrix('teuthology/no-ceph') + finally: + self.stop_patchers() + assert len(result) == 11 + assert len(result2) == len(result) + + def test_disable_extension(self): + fake_fs = { + 'teuthology': { + 'no-ceph': { + '%': None, + 'clusters': { + 'single.yaml': None, + }, + 'distros': { + 'baremetal.yaml': None, + 'rhel7.0.yaml': None, + 'ubuntu12.04.yaml': None, + 'ubuntu14.04.yaml': None, + 'vps.yaml': None, + 'vps_centos6.5.yaml': None, + 'vps_debian7.yaml': None, + 'vps_rhel6.4.yaml': None, + 'vps_rhel6.5.yaml': None, + 'vps_rhel7.0.yaml': None, + 'vps_ubuntu14.04.yaml': None, + }, + 'tasks': { + 'teuthology.yaml': None, + }, + }, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('teuthology/no-ceph') + finally: + self.stop_patchers() + + fake_fs2 = { + 'teuthology': { + 'no-ceph': { + '%': None, + 'clusters': { + 'single.yaml': None, + }, + 'distros': { + 'baremetal.yaml': None, + 'rhel7.0.yaml': None, + 'ubuntu12.04.yaml': None, + 'ubuntu14.04.yaml': None, + 'vps.yaml': None, + 'vps_centos6.5.yaml': None, + 'vps_debian7.yaml': None, + 'vps_rhel6.4.yaml': None, + 'vps_rhel6.5.yaml': None, + 'vps_rhel7.0.yaml': None, + 'vps_ubuntu14.04.yaml': None, + 'forcefilevps_ubuntu14.04.yaml.disable': None, + 'forcefilevps_ubuntu14.04.yaml.anotherextension': None, + }, + 'tasks': { + 'teuthology.yaml': None, + 'forcefilevps_ubuntu14.04notyaml': None, + }, + 'forcefilevps_ubuntu14.04notyaml': None, + 'tasks.disable': { + 'teuthology2.yaml': None, + 'forcefilevps_ubuntu14.04notyaml': None, + }, + }, + }, + } + self.start_patchers(fake_fs2) + try: + result2 = build_matrix.build_matrix('teuthology/no-ceph') + finally: + self.stop_patchers() + assert len(result) == 11 + assert len(result2) == len(result) + + def test_sort_order(self): + # This test ensures that 'ceph' comes before 'ceph-thrash' when yaml + # fragments are sorted. + fake_fs = { + 'thrash': { + '%': None, + 'ceph-thrash': {'default.yaml': None}, + 'ceph': {'base.yaml': None}, + 'clusters': {'mds-1active-1standby.yaml': None}, + 'debug': {'mds_client.yaml': None}, + 'fs': {'btrfs.yaml': None}, + 'msgr-failures': {'none.yaml': None}, + 'overrides': {'allowlist_wrongly_marked_down.yaml': None}, + 'tasks': {'cfuse_workunit_suites_fsstress.yaml': None}, + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('thrash') + finally: + self.stop_patchers() + assert len(result) == 1 + assert self.fragment_occurences(result, 'base.yaml') == 1 + fragments = result[0][1] + assert fragments[0] == 'thrash/ceph/base.yaml' + assert fragments[1] == 'thrash/ceph-thrash/default.yaml' + +class TestSubset(object): + patchpoints = [ + 'os.path.exists', + 'os.listdir', + 'os.path.isfile', + 'os.path.isdir', + 'builtins.open', + ] + + def setup(self): + self.mocks = dict() + self.patchers = dict() + for ppoint in self.__class__.patchpoints: + self.mocks[ppoint] = MagicMock() + self.patchers[ppoint] = patch(ppoint, self.mocks[ppoint]) + + def start_patchers(self, fake_fs): + fake_fns = make_fake_fstools(fake_fs) + # N.B.: relies on fake_fns being in same order as patchpoints + for ppoint, fn in zip(self.__class__.patchpoints, fake_fns): + self.mocks[ppoint].side_effect = fn + self.patchers[ppoint].start() + + def stop_patchers(self): + for patcher in self.patchers.values(): + patcher.stop() + + def teardown(self): + self.patchers.clear() + self.mocks.clear() + + MAX_FACETS = 10 + MAX_FANOUT = 3 + MAX_DEPTH = 3 + MAX_SUBSET = 10 + @staticmethod + def generate_fake_fs(max_facets, max_fanout, max_depth): + def yamilify(name): + return name + ".yaml" + def name_generator(): + x = 0 + while True: + yield(str(x)) + x += 1 + def generate_tree( + max_facets, max_fanout, max_depth, namegen, top=True): + if max_depth == 0: + return None + if max_facets == 0: + return None + items = random.choice(range(max_fanout)) + if items == 0 and top: + items = 1 + if items == 0: + return None + sub_max_facets = max_facets / items + tree = {} + for i in range(items): + subtree = generate_tree( + sub_max_facets, max_fanout, + max_depth - 1, namegen, top=False) + if subtree is not None: + tree['d' + next(namegen)] = subtree + else: + tree[yamilify('f' + next(namegen))] = None + random.choice([ + lambda: tree.update({'%': None}), + lambda: None])() + return tree + return { + 'root': generate_tree( + max_facets, max_fanout, max_depth, name_generator()) + } + + @staticmethod + def generate_subset(maxsub): + divisions = random.choice(range(maxsub-1))+1 + return (random.choice(range(divisions)), divisions) + + @staticmethod + def generate_description_list(tree, subset): + mat, first, matlimit = build_matrix._get_matrix( + 'root', subset=subset) + return [i[0] for i in build_matrix.generate_combinations( + 'root', mat, first, matlimit)], mat, first, matlimit + + @staticmethod + def verify_facets(tree, description_list, subset, mat, first, matlimit): + def flatten(tree): + for k,v in tree.items(): + if v is None and '.yaml' in k: + yield k + elif v is not None and '.disable' not in k: + for x in flatten(v): + yield x + + def pptree(tree, tabs=0): + ret = "" + for k, v in tree.items(): + if v is None: + ret += ('\t'*tabs) + k.ljust(10) + "\n" + else: + ret += ('\t'*tabs) + (k + ':').ljust(10) + "\n" + ret += pptree(v, tabs+1) + return ret + def deyamlify(name): + if name.endswith('.yaml'): + return name[:-5] + else: + return name + for facet in (deyamlify(_) for _ in flatten(tree)): + found = False + for i in description_list: + if facet in i: + found = True + break + if not found: + print("tree\n{tree}\ngenerated list\n{desc}\n\nfrom matrix\n\n{matrix}\nsubset {subset} without facet {fac}".format( + tree=pptree(tree), + desc='\n'.join(description_list), + subset=subset, + matrix=str(mat), + fac=facet)) + all_desc = build_matrix.generate_combinations( + 'root', + mat, + 0, + mat.size()) + for i, desc in zip(range(mat.size()), all_desc): + if i == first: + print('==========') + print("{} {}".format(i, desc)) + if i + 1 == matlimit: + print('==========') + assert found + + def test_random(self): + for i in range(10000): + tree = self.generate_fake_fs( + self.MAX_FACETS, + self.MAX_FANOUT, + self.MAX_DEPTH) + subset = self.generate_subset(self.MAX_SUBSET) + self.start_patchers(tree) + try: + dlist, mat, first, matlimit = self.generate_description_list(tree, subset) + finally: + self.stop_patchers() + self.verify_facets(tree, dlist, subset, mat, first, matlimit) diff --git a/teuthology/suite/test/test_init.py b/teuthology/suite/test/test_init.py new file mode 100644 index 0000000000..e477a25c1e --- /dev/null +++ b/teuthology/suite/test/test_init.py @@ -0,0 +1,266 @@ +import os + +from copy import deepcopy + +from mock import patch, Mock, DEFAULT + +from teuthology import suite +from scripts.suite import main +from teuthology.config import config + +import pytest +import time + +from teuthology.exceptions import ScheduleFailError + +def get_fake_time_and_sleep(): + # Below we set m_time.side_effect, but we also set m_time.return_value. + # The reason for this is that we need to store a 'fake time' that + # increments when m_sleep() is called; we could use any variable name we + # wanted for the return value, but since 'return_value' is already a + # standard term in mock, and since setting side_effect causes return_value + # to be ignored, it's safe to just reuse the name here. + m_time = Mock() + m_time.return_value = time.time() + + def m_time_side_effect(): + # Fake the slow passage of time + m_time.return_value += 0.1 + return m_time.return_value + m_time.side_effect = m_time_side_effect + + def f_sleep(seconds): + m_time.return_value += seconds + m_sleep = Mock(wraps=f_sleep) + return m_time, m_sleep + + +def setup_module(): + global m_time + global m_sleep + m_time, m_sleep = get_fake_time_and_sleep() + global patcher_time_sleep + patcher_time_sleep = patch.multiple( + 'teuthology.suite.time', + time=m_time, + sleep=m_sleep, + ) + patcher_time_sleep.start() + + +def teardown_module(): + patcher_time_sleep.stop() + + +@patch.object(suite.ResultsReporter, 'get_jobs') +def test_wait_success(m_get_jobs, caplog): + results = [ + [{'status': 'queued', 'job_id': '2'}], + [], + ] + final = [ + {'status': 'pass', 'job_id': '1', + 'description': 'DESC1', 'log_href': 'http://URL1'}, + {'status': 'fail', 'job_id': '2', + 'description': 'DESC2', 'log_href': 'http://URL2'}, + {'status': 'pass', 'job_id': '3', + 'description': 'DESC3', 'log_href': 'http://URL3'}, + ] + + def get_jobs(name, **kwargs): + if kwargs['fields'] == ['job_id', 'status']: + return in_progress.pop(0) + else: + return final + m_get_jobs.side_effect = get_jobs + suite.Run.WAIT_PAUSE = 1 + + in_progress = deepcopy(results) + assert 0 == suite.wait('name', 1, 'http://UPLOAD_URL') + assert m_get_jobs.called_with('name', fields=['job_id', 'status']) + assert 0 == len(in_progress) + assert 'fail http://UPLOAD_URL/name/2' in caplog.text + + in_progress = deepcopy(results) + assert 0 == suite.wait('name', 1, None) + assert m_get_jobs.called_with('name', fields=['job_id', 'status']) + assert 0 == len(in_progress) + assert 'fail http://URL2' in caplog.text + + +@patch.object(suite.ResultsReporter, 'get_jobs') +def test_wait_fails(m_get_jobs): + results = [] + results.append([{'status': 'queued', 'job_id': '2'}]) + results.append([{'status': 'queued', 'job_id': '2'}]) + results.append([{'status': 'queued', 'job_id': '2'}]) + + def get_jobs(name, **kwargs): + return results.pop(0) + m_get_jobs.side_effect = get_jobs + suite.Run.WAIT_PAUSE = 1 + suite.Run.WAIT_MAX_JOB_TIME = 1 + with pytest.raises(suite.WaitException): + suite.wait('name', 1, None) + + +REPO_SHORTHAND = [ + ['https://github.com/dude/foo', 'bar', + 'https://github.com/dude/bar.git'], + ['https://github.com/dude/foo/', 'bar', + 'https://github.com/dude/bar.git'], + ['https://github.com/ceph/ceph', 'ceph', + 'https://github.com/ceph/ceph.git'], + ['https://github.com/ceph/ceph/', 'ceph', + 'https://github.com/ceph/ceph.git'], + ['https://github.com/ceph/ceph.git', 'ceph', + 'https://github.com/ceph/ceph.git'], + ['https://github.com/ceph/ceph', 'ceph-ci', + 'https://github.com/ceph/ceph-ci.git'], + ['https://github.com/ceph/ceph-ci', 'ceph', + 'https://github.com/ceph/ceph.git'], + ['git://git.ceph.com/ceph.git', 'ceph', + 'git://git.ceph.com/ceph.git'], + ['git://git.ceph.com/ceph.git', 'ceph-ci', + 'git://git.ceph.com/ceph-ci.git'], + ['git://git.ceph.com/ceph-ci.git', 'ceph', + 'git://git.ceph.com/ceph.git'], + ['https://github.com/ceph/ceph.git', 'ceph/ceph-ci', + 'https://github.com/ceph/ceph-ci.git'], + ['https://github.com/ceph/ceph.git', 'https://github.com/ceph/ceph-ci', + 'https://github.com/ceph/ceph-ci'], + ['https://github.com/ceph/ceph.git', 'https://github.com/ceph/ceph-ci/', + 'https://github.com/ceph/ceph-ci/'], + ['https://github.com/ceph/ceph.git', 'https://github.com/ceph/ceph-ci.git', + 'https://github.com/ceph/ceph-ci.git'], +] + + +@pytest.mark.parametrize(['orig', 'shorthand', 'result'], REPO_SHORTHAND) +def test_expand_short_repo_name(orig, shorthand, result): + assert suite.expand_short_repo_name(shorthand, orig) == result + + +class TestSuiteMain(object): + def test_main(self): + suite_name = 'SUITE' + throttle = '3' + machine_type = 'burnupi' + + def prepare_and_schedule(obj): + assert obj.base_config.suite == suite_name + assert obj.args.throttle == throttle + + def fake_str(*args, **kwargs): + return 'fake' + + def fake_bool(*args, **kwargs): + return True + + def fake_false(*args, **kwargs): + return False + + with patch.multiple( + 'teuthology.suite.run.util', + fetch_repos=DEFAULT, + package_version_for_hash=fake_str, + git_branch_exists=fake_bool, + git_ls_remote=fake_str, + ): + with patch.multiple( + 'teuthology.suite.run.Run', + prepare_and_schedule=prepare_and_schedule, + ), patch.multiple( + 'teuthology.suite.run.os.path', + exists=fake_false, + ): + main([ + '--ceph', 'main', + '--suite', suite_name, + '--throttle', throttle, + '--machine-type', machine_type, + ]) + + @patch('teuthology.suite.util.smtplib.SMTP') + def test_machine_type_multi_error(self, m_smtp): + config.results_email = "example@example.com" + with pytest.raises(ScheduleFailError) as exc: + main([ + '--ceph', 'main', + '--suite', 'suite_name', + '--throttle', '3', + '--machine-type', 'multi', + '--dry-run', + ]) + assert str(exc.value) == "Scheduling failed: 'multi' is not a valid machine_type. \ +Maybe you want 'gibba,smithi,mira' or similar" + m_smtp.assert_not_called() + + @patch('teuthology.suite.util.smtplib.SMTP') + def test_machine_type_none_error(self, m_smtp): + config.result_email = 'example@example.com' + with pytest.raises(ScheduleFailError) as exc: + main([ + '--ceph', 'main', + '--suite', 'suite_name', + '--throttle', '3', + '--machine-type', 'None', + '--dry-run', + ]) + assert str(exc.value) == "Scheduling failed: Must specify a machine_type" + m_smtp.assert_not_called() + + def test_schedule_suite_noverify(self): + suite_name = 'noop' + suite_dir = os.path.dirname(__file__) + throttle = '3' + machine_type = 'burnupi' + + with patch.multiple( + 'teuthology.suite.util', + fetch_repos=DEFAULT, + teuthology_schedule=DEFAULT, + get_arch=lambda x: 'x86_64', + get_gitbuilder_hash=DEFAULT, + git_ls_remote=lambda *args: '1234', + package_version_for_hash=DEFAULT, + ) as m: + m['package_version_for_hash'].return_value = 'fake-9.5' + config.suite_verify_ceph_hash = False + main([ + '--ceph', 'main', + '--suite', suite_name, + '--suite-dir', suite_dir, + '--suite-relpath', '', + '--throttle', throttle, + '--machine-type', machine_type + ]) + m_sleep.assert_called_with(int(throttle)) + m['get_gitbuilder_hash'].assert_not_called() + + def test_schedule_suite(self): + suite_name = 'noop' + suite_dir = os.path.dirname(__file__) + throttle = '3' + machine_type = 'burnupi' + + with patch.multiple( + 'teuthology.suite.util', + fetch_repos=DEFAULT, + teuthology_schedule=DEFAULT, + get_arch=lambda x: 'x86_64', + get_gitbuilder_hash=DEFAULT, + git_ls_remote=lambda *args: '12345', + package_version_for_hash=DEFAULT, + ) as m: + m['package_version_for_hash'].return_value = 'fake-9.5' + config.suite_verify_ceph_hash = True + main([ + '--ceph', 'main', + '--suite', suite_name, + '--suite-dir', suite_dir, + '--suite-relpath', '', + '--throttle', throttle, + '--machine-type', machine_type + ]) + m_sleep.assert_called_with(int(throttle)) diff --git a/teuthology/suite/test/test_matrix.py b/teuthology/suite/test/test_matrix.py new file mode 100644 index 0000000000..596bb37a74 --- /dev/null +++ b/teuthology/suite/test/test_matrix.py @@ -0,0 +1,82 @@ +from teuthology.suite import matrix + + +def verify_matrix_output_diversity(res): + """ + Verifies that the size of the matrix passed matches the number of unique + outputs from res.index + """ + sz = res.size() + s = frozenset([matrix.generate_lists(res.index(i)) for i in range(sz)]) + for i in range(res.size()): + assert sz == len(s) + + +def mbs(num, l): + return matrix.Sum(num*10, [matrix.Base(i + (100*num)) for i in l]) + + +class TestMatrix(object): + def test_simple(self): + verify_matrix_output_diversity(mbs(1, range(6))) + + def test_simple2(self): + verify_matrix_output_diversity(mbs(1, range(5))) + + # The test_product* tests differ by the degree by which dimension + # sizes share prime factors + def test_product_simple(self): + verify_matrix_output_diversity( + matrix.Product(1, [mbs(1, range(6)), mbs(2, range(2))])) + + def test_product_3_facets_2_prime_factors(self): + verify_matrix_output_diversity(matrix.Product(1, [ + mbs(1, range(6)), + mbs(2, range(2)), + mbs(3, range(3)), + ])) + + def test_product_3_facets_2_prime_factors_one_larger(self): + verify_matrix_output_diversity(matrix.Product(1, [ + mbs(1, range(2)), + mbs(2, range(5)), + mbs(4, range(4)), + ])) + + def test_product_4_facets_2_prime_factors(self): + verify_matrix_output_diversity(matrix.Sum(1, [ + mbs(1, range(6)), + mbs(3, range(3)), + mbs(2, range(2)), + mbs(4, range(9)), + ])) + + def test_product_2_facets_2_prime_factors(self): + verify_matrix_output_diversity(matrix.Sum(1, [ + mbs(1, range(2)), + mbs(2, range(5)), + ])) + + def test_product_with_sum(self): + verify_matrix_output_diversity(matrix.Sum( + 9, + [ + mbs(10, range(6)), + matrix.Product(1, [ + mbs(1, range(2)), + mbs(2, range(5)), + mbs(4, range(4))]), + matrix.Product(8, [ + mbs(7, range(2)), + mbs(6, range(5)), + mbs(5, range(4))]) + ] + )) + + def test_product_with_pick_random(self): + verify_matrix_output_diversity(matrix.PickRandom(1, [ + mbs(1, range(6)), + mbs(3, range(3)), + mbs(2, range(2)), + mbs(4, range(9)), + ])) diff --git a/teuthology/suite/test/test_merge.py b/teuthology/suite/test/test_merge.py new file mode 100644 index 0000000000..2a4c3bfa25 --- /dev/null +++ b/teuthology/suite/test/test_merge.py @@ -0,0 +1,233 @@ +import logging +from textwrap import dedent + +from mock import patch, MagicMock +from unittest import TestCase + +from teuthology.suite import build_matrix +from teuthology.suite.merge import config_merge +from teuthology.test.fake_fs import make_fake_fstools + +log = logging.getLogger(__name__) + +class TestMerge(TestCase): + patchpoints = [ + 'os.path.exists', + 'os.listdir', + 'os.path.isfile', + 'os.path.isdir', + 'builtins.open', + ] + + def setUp(self): + log.debug("setUp") + self.mocks = dict() + self.patchers = dict() + for ppoint in self.__class__.patchpoints: + self.mocks[ppoint] = MagicMock() + self.patchers[ppoint] = patch(ppoint, self.mocks[ppoint]) + + def start_patchers(self, fake_fs): + fake_fns = make_fake_fstools(fake_fs) + # N.B.: relies on fake_fns being in same order as patchpoints + for ppoint, fn in zip(self.__class__.patchpoints, fake_fns): + self.mocks[ppoint].side_effect = fn + self.patchers[ppoint].start() + + def stop_patchers(self): + for patcher in self.patchers.values(): + patcher.stop() + + def tearDown(self): + log.debug("tearDown") + self.patchers.clear() + self.mocks.clear() + + def test_premerge(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'a.yaml': dedent(""" + teuthology: + premerge: reject() + foo: bar + """), + }, + 'c.yaml': dedent(""" + top: pot + """), + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + self.assertEqual(len(result), 1) + configs = list(config_merge(result)) + self.assertEqual(len(configs), 1) + desc, frags, yaml = configs[0] + self.assertIn("top", yaml) + self.assertNotIn("foo", yaml) + finally: + self.stop_patchers() + + def test_postmerge(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'a.yaml': dedent(""" + teuthology: + postmerge: + - reject() + foo: bar + """), + 'b.yaml': dedent(""" + baz: zab + """), + }, + 'c.yaml': dedent(""" + top: pot + """), + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + self.assertEqual(len(result), 2) + configs = list(config_merge(result)) + self.assertEqual(len(configs), 1) + desc, frags, yaml = configs[0] + self.assertIn("top", yaml) + self.assertIn("baz", yaml) + self.assertNotIn("foo", yaml) + finally: + self.stop_patchers() + + def test_postmerge_concat(self): + fake_fs = { + 'd0_0': { + '%': None, + 'd1_0': { + 'a.yaml': dedent(""" + teuthology: + postmerge: + - local a = 1 + foo: bar + """), + 'b.yaml': dedent(""" + teuthology: + postmerge: + - local a = 2 + baz: zab + """), + }, + 'z.yaml': dedent(""" + teuthology: + postmerge: + - if a == 1 then reject() end + top: pot + """), + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + self.assertEqual(len(result), 2) + configs = list(config_merge(result)) + self.assertEqual(len(configs), 1) + desc, frags, yaml = configs[0] + self.assertIn("top", yaml) + self.assertIn("baz", yaml) + self.assertNotIn("foo", yaml) + finally: + self.stop_patchers() + + + def test_yaml_mutation(self): + fake_fs = { + 'd0_0': { + '%': None, + 'c.yaml': dedent(""" + teuthology: + postmerge: + - | + yaml["test"] = py_dict() + top: pot + """), + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + self.assertEqual(len(result), 1) + configs = list(config_merge(result)) + self.assertEqual(len(configs), 1) + desc, frags, yaml = configs[0] + self.assertIn("test", yaml) + self.assertDictEqual(yaml["test"], {}) + finally: + self.stop_patchers() + + def test_sandbox(self): + fake_fs = { + 'd0_0': { + '%': None, + 'c.yaml': dedent(""" + teuthology: + postmerge: + - | + log.debug("_ENV contains:") + for k,v in pairs(_ENV) do + log.debug("_ENV['%s'] = %s", tostring(k), tostring(v)) + end + local check = { + "assert", + "error", + "ipairs", + "next", + "pairs", + "tonumber", + "tostring", + "py_attrgetter", + "py_dict", + "py_list", + "py_tuple", + "py_enumerate", + "py_iterex", + "py_itemgetter", + "math", + "reject", + "accept", + "deep_merge", + "log", + "reject", + "yaml_load", + } + for _,v in ipairs(check) do + log.debug("checking %s", tostring(v)) + assert(_ENV[v]) + end + local block = { + "coroutine", + "debug", + "io", + "os", + "package", + } + for _,v in ipairs(block) do + log.debug("checking %s", tostring(v)) + assert(_ENV[v] == nil) + end + top: pot + """), + }, + } + self.start_patchers(fake_fs) + try: + result = build_matrix.build_matrix('d0_0') + self.assertEqual(len(result), 1) + configs = list(config_merge(result)) + self.assertEqual(len(configs), 1) + finally: + self.stop_patchers() diff --git a/teuthology/suite/test/test_placeholder.py b/teuthology/suite/test/test_placeholder.py new file mode 100644 index 0000000000..acf1b6a44f --- /dev/null +++ b/teuthology/suite/test/test_placeholder.py @@ -0,0 +1,55 @@ +from teuthology.suite.placeholder import ( + substitute_placeholders, dict_templ, Placeholder +) + + +class TestPlaceholder(object): + def test_substitute_placeholders(self): + suite_hash = 'suite_hash' + input_dict = dict( + suite='suite', + suite_branch='suite_branch', + suite_hash=suite_hash, + ceph_branch='ceph_branch', + ceph_hash='ceph_hash', + teuthology_branch='teuthology_branch', + teuthology_sha1='teuthology_sha1', + machine_type='machine_type', + distro='distro', + distro_version='distro_version', + archive_upload='archive_upload', + archive_upload_key='archive_upload_key', + suite_repo='https://example.com/ceph/suite.git', + suite_relpath='', + ceph_repo='https://example.com/ceph/ceph.git', + flavor='default' + ) + output_dict = substitute_placeholders(dict_templ, input_dict) + assert output_dict['suite'] == 'suite' + assert output_dict['suite_sha1'] == suite_hash + assert isinstance(dict_templ['suite'], Placeholder) + assert isinstance( + dict_templ['overrides']['admin_socket']['branch'], + Placeholder) + + def test_null_placeholders_dropped(self): + input_dict = dict( + suite='suite', + suite_branch='suite_branch', + suite_hash='suite_hash', + ceph_branch='ceph_branch', + ceph_hash='ceph_hash', + teuthology_branch='teuthology_branch', + teuthology_sha1='teuthology_sha1', + machine_type='machine_type', + archive_upload='archive_upload', + archive_upload_key='archive_upload_key', + distro=None, + distro_version=None, + suite_repo='https://example.com/ceph/suite.git', + suite_relpath='', + ceph_repo='https://example.com/ceph/ceph.git', + flavor=None, + ) + output_dict = substitute_placeholders(dict_templ, input_dict) + assert 'os_type' not in output_dict diff --git a/teuthology/suite/test/test_run_.py b/teuthology/suite/test/test_run_.py new file mode 100644 index 0000000000..278e1358b1 --- /dev/null +++ b/teuthology/suite/test/test_run_.py @@ -0,0 +1,425 @@ +import os +import pytest +import requests +import contextlib +import yaml + +from datetime import datetime +from mock import patch, call, ANY, DEFAULT +from teuthology.util.compat import PY3 +if PY3: + from io import StringIO + from io import BytesIO +else: + from io import BytesIO as StringIO + from io import BytesIO + +from teuthology.config import config, YamlConfig +from teuthology.exceptions import ScheduleFailError +from teuthology.suite import run +from teuthology import packaging + + +class TestRun(object): + klass = run.Run + + def setup(self): + self.args_dict = dict( + suite='suite', + suite_branch='suite_branch', + suite_relpath='', + ceph_branch='ceph_branch', + ceph_sha1='ceph_sha1', + email='address@example.com', + teuthology_branch='teuthology_branch', + kernel_branch=None, + flavor='flavor', + distro='ubuntu', + machine_type='machine_type', + base_yaml_paths=list(), + ) + self.args = YamlConfig.from_dict(self.args_dict) + + @patch('teuthology.suite.run.util.fetch_repos') + @patch('teuthology.suite.run.util.git_ls_remote') + @patch('teuthology.suite.run.Run.choose_ceph_version') + @patch('teuthology.suite.run.util.git_validate_sha1') + def test_email_addr(self, m_git_validate_sha1, m_choose_ceph_version, + m_git_ls_remote, m_fetch_repos): + # neuter choose_X_branch + m_git_validate_sha1.return_value = self.args_dict['ceph_sha1'] + m_choose_ceph_version.return_value = self.args_dict['ceph_sha1'] + self.args_dict['teuthology_branch'] = 'main' + self.args_dict['suite_branch'] = 'main' + m_git_ls_remote.return_value = 'suite_sha1' + + runobj = self.klass(self.args) + assert runobj.base_config.email == self.args_dict['email'] + + @patch('teuthology.suite.run.util.fetch_repos') + def test_name(self, m_fetch_repos): + stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + with patch.object(run.Run, 'create_initial_config', + return_value=run.JobConfig()): + name = run.Run(self.args).name + assert str(stamp) in name + + @patch('teuthology.suite.run.util.fetch_repos') + def test_name_user(self, m_fetch_repos): + self.args.user = 'USER' + with patch.object(run.Run, 'create_initial_config', + return_value=run.JobConfig()): + name = run.Run(self.args).name + assert name.startswith('USER-') + + @patch('teuthology.suite.run.util.git_branch_exists') + @patch('teuthology.suite.run.util.package_version_for_hash') + @patch('teuthology.suite.run.util.git_ls_remote') + def test_branch_nonexistent( + self, + m_git_ls_remote, + m_package_version_for_hash, + m_git_branch_exists, + ): + config.gitbuilder_host = 'example.com' + m_git_ls_remote.side_effect = [ + # First call will be for the ceph hash + None, + # Second call will be for the suite hash + 'suite_hash', + ] + m_package_version_for_hash.return_value = 'a_version' + m_git_branch_exists.return_value = True + self.args.ceph_branch = 'ceph_sha1' + self.args.ceph_sha1 = None + with pytest.raises(ScheduleFailError): + self.klass(self.args) + + @patch('teuthology.suite.run.util.fetch_repos') + @patch('requests.head') + @patch('teuthology.suite.run.util.git_branch_exists') + @patch('teuthology.suite.run.util.package_version_for_hash') + @patch('teuthology.suite.run.util.git_ls_remote') + def test_sha1_exists( + self, + m_git_ls_remote, + m_package_version_for_hash, + m_git_branch_exists, + m_requests_head, + m_fetch_repos, + ): + config.gitbuilder_host = 'example.com' + m_package_version_for_hash.return_value = 'ceph_hash' + m_git_branch_exists.return_value = True + resp = requests.Response() + resp.reason = 'OK' + resp.status_code = 200 + m_requests_head.return_value = resp + # only one call to git_ls_remote in this case + m_git_ls_remote.return_value = "suite_branch" + run = self.klass(self.args) + assert run.base_config.sha1 == 'ceph_sha1' + assert run.base_config.branch == 'ceph_branch' + + @patch('requests.head') + @patch('teuthology.suite.util.git_branch_exists') + @patch('teuthology.suite.util.package_version_for_hash') + def test_sha1_nonexistent( + self, + m_package_version_for_hash, + m_git_branch_exists, + m_requests_head, + ): + config.gitbuilder_host = 'example.com' + m_package_version_for_hash.return_value = 'ceph_hash' + m_git_branch_exists.return_value = True + resp = requests.Response() + resp.reason = 'Not Found' + resp.status_code = 404 + m_requests_head.return_value = resp + self.args.ceph_sha1 = 'ceph_hash_dne' + with pytest.raises(ScheduleFailError): + self.klass(self.args) + + @patch('teuthology.suite.util.smtplib.SMTP') + @patch('teuthology.suite.util.git_ls_remote') + def test_teuthology_branch_nonexistent( + self, + m_git_ls_remote, + m_smtp, + ): + m_git_ls_remote.return_value = None + config.teuthology_path = None + config.results_email = "example@example.com" + self.args.dry_run = True + self.args.teuthology_branch = 'no_branch' + with pytest.raises(ScheduleFailError): + self.klass(self.args) + m_smtp.assert_not_called() + + @patch('teuthology.suite.run.util.fetch_repos') + @patch('teuthology.suite.run.util.git_branch_exists') + @patch('teuthology.suite.run.util.package_version_for_hash') + @patch('teuthology.suite.run.util.git_ls_remote') + @patch('teuthology.suite.run.os.path.exists') + def test_regression( + self, + m_qa_teuthology_branch_exists, + m_git_ls_remote, + m_package_version_for_hash, + m_git_branch_exists, + m_fetch_repos, + ): + config.use_shaman = False + config.gitbuilder_host = 'example.com' + m_package_version_for_hash.return_value = 'ceph_hash' + m_git_branch_exists.return_value = True + m_git_ls_remote.return_value = "suite_branch" + m_qa_teuthology_branch_exists.return_value = False + self.args_dict = { + 'base_yaml_paths': [], + 'ceph_branch': 'main', + 'machine_type': 'smithi', + 'flavor': 'default', + 'kernel_branch': 'testing', + 'suite': 'krbd', + } + self.args = YamlConfig.from_dict(self.args_dict) + with patch.multiple( + 'teuthology.packaging.GitbuilderProject', + _get_package_sha1=DEFAULT, + ) as m: + assert m != dict() + m['_get_package_sha1'].return_value = 'SHA1' + conf = dict( + os_type='ubuntu', + os_version='16.04', + ) + assert packaging.GitbuilderProject('ceph', conf).sha1 == 'SHA1' + run_ = self.klass(self.args) + assert run_.base_config['kernel']['sha1'] == 'SHA1' + + +class TestScheduleSuite(object): + klass = run.Run + + def setup(self): + self.args_dict = dict( + suite='suite', + suite_relpath='', + suite_dir='suite_dir', + suite_branch='main', + ceph_branch='ceph_branch', + ceph_sha1='ceph_sha1', + teuthology_branch='main', + kernel_branch=None, + flavor='flavor', + distro='ubuntu', + distro_version='14.04', + machine_type='machine_type', + base_yaml_paths=list(), + ) + self.args = YamlConfig.from_dict(self.args_dict) + + @patch('teuthology.suite.run.Run.schedule_jobs') + @patch('teuthology.suite.run.Run.write_rerun_memo') + @patch('teuthology.suite.util.has_packages_for_distro') + @patch('teuthology.suite.util.get_package_versions') + @patch('teuthology.suite.util.get_install_task_flavor') + @patch('teuthology.suite.merge.open') + @patch('teuthology.suite.run.build_matrix') + @patch('teuthology.suite.util.git_ls_remote') + @patch('teuthology.suite.util.package_version_for_hash') + @patch('teuthology.suite.util.git_validate_sha1') + @patch('teuthology.suite.util.get_arch') + def test_successful_schedule( + self, + m_get_arch, + m_git_validate_sha1, + m_package_version_for_hash, + m_git_ls_remote, + m_build_matrix, + m_open, + m_get_install_task_flavor, + m_get_package_versions, + m_has_packages_for_distro, + m_write_rerun_memo, + m_schedule_jobs, + ): + m_get_arch.return_value = 'x86_64' + m_git_validate_sha1.return_value = self.args.ceph_sha1 + m_package_version_for_hash.return_value = 'ceph_version' + m_git_ls_remote.return_value = 'suite_hash' + build_matrix_desc = 'desc' + build_matrix_frags = ['frag1.yml', 'frag2.yml'] + build_matrix_output = [ + (build_matrix_desc, build_matrix_frags), + ] + m_build_matrix.return_value = build_matrix_output + frag1_read_output = 'field1: val1' + frag2_read_output = 'field2: val2' + m_open.side_effect = [ + StringIO(frag1_read_output), + StringIO(frag2_read_output), + contextlib.closing(BytesIO()) + ] + m_get_install_task_flavor.return_value = 'default' + m_get_package_versions.return_value = dict() + m_has_packages_for_distro.return_value = True + # schedule_jobs() is just neutered; check calls below + + self.args.newest = 0 + self.args.num = 42 + runobj = self.klass(self.args) + runobj.base_args = list() + count = runobj.schedule_suite() + assert(count == 1) + assert runobj.base_config['suite_sha1'] == 'suite_hash' + m_has_packages_for_distro.assert_has_calls( + [call('ceph_sha1', 'ubuntu', '14.04', 'default', {})], + ) + y = { + 'teuthology': { + 'fragments_dropped': [], + 'meta': {}, + 'postmerge': [] + }, + 'field1': 'val1', + 'field2': 'val2' + } + expected_job = dict( + yaml=y, + sha1='ceph_sha1', + args=[ + '--num', + '42', + '--description', + os.path.join(self.args.suite, build_matrix_desc), + '--', + ANY, # base config + '-' + ], + stdin=yaml.dump(y), + desc=os.path.join(self.args.suite, build_matrix_desc), + ) + + m_schedule_jobs.assert_has_calls( + [call([], [expected_job], runobj.name)], + ) + m_write_rerun_memo.assert_called_once_with() + + @patch('teuthology.suite.util.find_git_parent') + @patch('teuthology.suite.run.Run.schedule_jobs') + @patch('teuthology.suite.util.has_packages_for_distro') + @patch('teuthology.suite.util.get_package_versions') + @patch('teuthology.suite.util.get_install_task_flavor') + @patch('teuthology.suite.run.config_merge') + @patch('teuthology.suite.run.build_matrix') + @patch('teuthology.suite.util.git_ls_remote') + @patch('teuthology.suite.util.package_version_for_hash') + @patch('teuthology.suite.util.git_validate_sha1') + @patch('teuthology.suite.util.get_arch') + def test_newest_failure( + self, + m_get_arch, + m_git_validate_sha1, + m_package_version_for_hash, + m_git_ls_remote, + m_build_matrix, + m_config_merge, + m_get_install_task_flavor, + m_get_package_versions, + m_has_packages_for_distro, + m_schedule_jobs, + m_find_git_parent, + ): + m_get_arch.return_value = 'x86_64' + m_git_validate_sha1.return_value = self.args.ceph_sha1 + m_package_version_for_hash.return_value = 'ceph_version' + m_git_ls_remote.return_value = 'suite_hash' + build_matrix_desc = 'desc' + build_matrix_frags = ['frag.yml'] + build_matrix_output = [ + (build_matrix_desc, build_matrix_frags), + ] + m_build_matrix.return_value = build_matrix_output + m_config_merge.return_value = [(a, b, {}) for a, b in build_matrix_output] + m_get_install_task_flavor.return_value = 'default' + m_get_package_versions.return_value = dict() + m_has_packages_for_distro.side_effect = [ + False for i in range(11) + ] + + m_find_git_parent.side_effect = lambda proj, sha1: sha1 + '^' + + self.args.newest = 10 + runobj = self.klass(self.args) + runobj.base_args = list() + with pytest.raises(ScheduleFailError) as exc: + runobj.schedule_suite() + assert 'Exceeded 10 backtracks' in str(exc.value) + m_find_git_parent.assert_has_calls( + [call('ceph', 'ceph_sha1' + i * '^') for i in range(10)] + ) + + @patch('teuthology.suite.util.find_git_parent') + @patch('teuthology.suite.run.Run.schedule_jobs') + @patch('teuthology.suite.run.Run.write_rerun_memo') + @patch('teuthology.suite.util.has_packages_for_distro') + @patch('teuthology.suite.util.get_package_versions') + @patch('teuthology.suite.util.get_install_task_flavor') + @patch('teuthology.suite.run.config_merge') + @patch('teuthology.suite.run.build_matrix') + @patch('teuthology.suite.util.git_ls_remote') + @patch('teuthology.suite.util.package_version_for_hash') + @patch('teuthology.suite.util.git_validate_sha1') + @patch('teuthology.suite.util.get_arch') + def test_newest_success( + self, + m_get_arch, + m_git_validate_sha1, + m_package_version_for_hash, + m_git_ls_remote, + m_build_matrix, + m_config_merge, + m_get_install_task_flavor, + m_get_package_versions, + m_has_packages_for_distro, + m_write_rerun_memo, + m_schedule_jobs, + m_find_git_parent, + ): + m_get_arch.return_value = 'x86_64' + # rig has_packages_for_distro to fail this many times, so + # everything will run NUM_FAILS+1 times + NUM_FAILS = 5 + m_git_validate_sha1.return_value = self.args.ceph_sha1 + m_package_version_for_hash.return_value = 'ceph_version' + m_git_ls_remote.return_value = 'suite_hash' + build_matrix_desc = 'desc' + build_matrix_frags = ['frag.yml'] + build_matrix_output = [ + (build_matrix_desc, build_matrix_frags), + ] + m_build_matrix.return_value = build_matrix_output + m_config_merge.return_value = [(a, b, {}) for a, b in build_matrix_output] + m_get_install_task_flavor.return_value = 'default' + m_get_package_versions.return_value = dict() + # NUM_FAILS, then success + m_has_packages_for_distro.side_effect = \ + [False for i in range(NUM_FAILS)] + [True] + + m_find_git_parent.side_effect = lambda proj, sha1: sha1 + '^' + + self.args.newest = 10 + runobj = self.klass(self.args) + runobj.base_args = list() + count = runobj.schedule_suite() + assert count == 1 + m_has_packages_for_distro.assert_has_calls( + [call('ceph_sha1' + '^' * i, 'ubuntu', '14.04', 'default', {}) + for i in range(NUM_FAILS+1)] + ) + m_find_git_parent.assert_has_calls( + [call('ceph', 'ceph_sha1' + i * '^') for i in range(NUM_FAILS)] + ) diff --git a/teuthology/suite/test/test_util.py b/teuthology/suite/test/test_util.py new file mode 100644 index 0000000000..fdd352c450 --- /dev/null +++ b/teuthology/suite/test/test_util.py @@ -0,0 +1,374 @@ +import os +import pytest +import tempfile + +from copy import deepcopy +from mock import Mock, patch + +from teuthology.config import config +from teuthology.orchestra.opsys import OS +from teuthology.suite import util +from teuthology.exceptions import ScheduleFailError + + +REPO_PROJECTS_AND_URLS = [ + 'ceph', + 'https://github.com/not_ceph/ceph.git', +] + + +@pytest.mark.parametrize('project_or_url', REPO_PROJECTS_AND_URLS) +@patch('subprocess.check_output') +def test_git_branch_exists(m_check_output, project_or_url): + m_check_output.return_value = '' + assert False == util.git_branch_exists( + project_or_url, 'nobranchnowaycanthappen') + m_check_output.return_value = b'HHH branch' + assert True == util.git_branch_exists(project_or_url, 'main') + + +@pytest.fixture +def git_repository(request): + d = tempfile.mkdtemp() + os.system(""" + cd {d} + git init + touch A + git config user.email 'you@example.com' + git config user.name 'Your Name' + git add A + git commit -m 'A' A + git rev-parse --abbrev-ref main || git checkout -b main + """.format(d=d)) + + def fin(): + os.system("rm -fr " + d) + request.addfinalizer(fin) + return d + + +class TestUtil(object): + def setup(self): + config.use_shaman = False + + @patch('teuthology.suite.util.smtplib.SMTP') + def test_schedule_fail(self, m_smtp): + config.results_email = "example@example.com" + with pytest.raises(ScheduleFailError) as exc: + util.schedule_fail(message="error msg", dry_run=False) + assert str(exc.value) == "Scheduling failed: error msg" + m_smtp.assert_called() + + @patch('teuthology.suite.util.smtplib.SMTP') + def test_schedule_fail_dryrun(self, m_smtp): + config.results_email = "example@example.com" + with pytest.raises(ScheduleFailError) as exc: + util.schedule_fail(message="error msg", dry_run=True) + assert str(exc.value) == "Scheduling failed: error msg" + m_smtp.assert_not_called() + + @patch('teuthology.suite.util.smtplib.SMTP') + def test_fetch_repo_no_branch(self, m_smtp): + config.results_email = "example@example.com" + with pytest.raises(ScheduleFailError) as exc: + util.fetch_repos("no-branch", "test1", dry_run=False) + assert str(exc.value) == "Scheduling test1 failed: \ +Branch 'no-branch' not found in repo: https://github.com/ceph/ceph-ci.git!" + m_smtp.assert_called() + + @patch('teuthology.suite.util.smtplib.SMTP') + def test_fetch_repo_no_branch_dryrun(self, m_smtp): + config.results_email = "example@example.com" + with pytest.raises(ScheduleFailError) as exc: + util.fetch_repos("no-branch", "test1", dry_run=True) + assert str(exc.value) == "Scheduling test1 failed: \ +Branch 'no-branch' not found in repo: https://github.com/ceph/ceph-ci.git!" + m_smtp.assert_not_called() + + @patch('requests.get') + def test_get_hash_success(self, m_get): + mock_resp = Mock() + mock_resp.ok = True + mock_resp.text = "the_hash" + m_get.return_value = mock_resp + result = util.get_gitbuilder_hash() + assert result == "the_hash" + + @patch('requests.get') + def test_get_hash_fail(self, m_get): + mock_resp = Mock() + mock_resp.ok = False + m_get.return_value = mock_resp + result = util.get_gitbuilder_hash() + assert result is None + + @patch('requests.get') + def test_package_version_for_hash(self, m_get): + mock_resp = Mock() + mock_resp.ok = True + mock_resp.text = "the_version" + m_get.return_value = mock_resp + result = util.package_version_for_hash("hash") + assert result == "the_version" + + @patch('requests.get') + def test_get_branch_info(self, m_get): + mock_resp = Mock() + mock_resp.ok = True + mock_resp.json.return_value = "some json" + m_get.return_value = mock_resp + result = util.get_branch_info("teuthology", "main") + m_get.assert_called_with( + "https://api.github.com/repos/ceph/teuthology/git/refs/heads/main" + ) + assert result == "some json" + + @patch('teuthology.lock.query') + def test_get_arch_fail(self, m_query): + m_query.list_locks.return_value = False + util.get_arch('magna') + m_query.list_locks.assert_called_with(machine_type="magna", count=1) + + @patch('teuthology.lock.query') + def test_get_arch_success(self, m_query): + m_query.list_locks.return_value = [{"arch": "arch"}] + result = util.get_arch('magna') + m_query.list_locks.assert_called_with( + machine_type="magna", + count=1 + ) + assert result == "arch" + + def test_build_git_url_github(self): + assert 'project' in util.build_git_url('project') + owner = 'OWNER' + git_url = util.build_git_url('project', project_owner=owner) + assert owner in git_url + + @patch('teuthology.config.TeuthologyConfig.get_ceph_qa_suite_git_url') + def test_build_git_url_ceph_qa_suite_custom( + self, + m_get_ceph_qa_suite_git_url): + url = 'http://foo.com/some' + m_get_ceph_qa_suite_git_url.return_value = url + '.git' + assert url == util.build_git_url('ceph-qa-suite') + + @patch('teuthology.config.TeuthologyConfig.get_ceph_git_url') + def test_build_git_url_ceph_custom(self, m_get_ceph_git_url): + url = 'http://foo.com/some' + m_get_ceph_git_url.return_value = url + '.git' + assert url == util.build_git_url('ceph') + + @patch('teuthology.config.TeuthologyConfig.get_ceph_cm_ansible_git_url') + def test_build_git_url_ceph_cm_ansible_custom(self, m_get_ceph_cm_ansible_git_url): + url = 'http://foo.com/some' + m_get_ceph_cm_ansible_git_url.return_value = url + '.git' + assert url == util.build_git_url('ceph-cm-ansible') + + @patch('teuthology.config.TeuthologyConfig.get_ceph_git_url') + def test_git_ls_remote(self, m_get_ceph_git_url, git_repository): + m_get_ceph_git_url.return_value = git_repository + assert util.git_ls_remote('ceph', 'nobranch') is None + assert util.git_ls_remote('ceph', 'main') is not None + + @patch('teuthology.suite.util.requests.get') + def test_find_git_parent(self, m_requests_get): + refresh_resp = Mock(ok=True) + history_resp = Mock(ok=True) + history_resp.json.return_value = {'sha1s': ['sha1', 'sha1_p']} + m_requests_get.side_effect = [refresh_resp, history_resp] + parent_sha1 = util.find_git_parent('ceph', 'sha1') + assert len(m_requests_get.mock_calls) == 2 + assert parent_sha1 == 'sha1_p' + + +class TestFlavor(object): + + def test_get_install_task_flavor_bare(self): + config = dict( + tasks=[ + dict( + install=dict(), + ), + ], + ) + assert util.get_install_task_flavor(config) == 'default' + + def test_get_install_task_flavor_simple(self): + config = dict( + tasks=[ + dict( + install=dict( + flavor='notcmalloc', + ), + ), + ], + ) + assert util.get_install_task_flavor(config) == 'notcmalloc' + + def test_get_install_task_flavor_override_simple(self): + config = dict( + tasks=[ + dict(install=dict()), + ], + overrides=dict( + install=dict( + flavor='notcmalloc', + ), + ), + ) + assert util.get_install_task_flavor(config) == 'notcmalloc' + + def test_get_install_task_flavor_override_project(self): + config = dict( + tasks=[ + dict(install=dict()), + ], + overrides=dict( + install=dict( + ceph=dict( + flavor='notcmalloc', + ), + ), + ), + ) + assert util.get_install_task_flavor(config) == 'notcmalloc' + + +class TestMissingPackages(object): + """ + Tests the functionality that checks to see if a + scheduled job will have missing packages in gitbuilder. + """ + def setup(self): + package_versions = { + 'sha1': { + 'ubuntu': { + '14.04': { + 'basic': '1.0' + } + } + } + } + self.pv = package_versions + + def test_os_in_package_versions(self): + assert self.pv == util.get_package_versions( + "sha1", + "ubuntu", + "14.04", + "basic", + package_versions=self.pv + ) + + @patch("teuthology.suite.util.package_version_for_hash") + def test_os_not_in_package_versions(self, m_package_versions_for_hash): + m_package_versions_for_hash.return_value = "1.1" + result = util.get_package_versions( + "sha1", + "rhel", + "7.0", + "basic", + package_versions=self.pv + ) + expected = deepcopy(self.pv) + expected['sha1'].update( + { + 'rhel': { + '7.0': { + 'basic': '1.1' + } + } + } + ) + assert result == expected + + @patch("teuthology.suite.util.package_version_for_hash") + def test_package_versions_not_found(self, m_package_versions_for_hash): + # if gitbuilder returns a status that's not a 200, None is returned + m_package_versions_for_hash.return_value = None + result = util.get_package_versions( + "sha1", + "rhel", + "7.0", + "basic", + package_versions=self.pv + ) + assert result == self.pv + + @patch("teuthology.suite.util.package_version_for_hash") + def test_no_package_versions_kwarg(self, m_package_versions_for_hash): + m_package_versions_for_hash.return_value = "1.0" + result = util.get_package_versions( + "sha1", + "ubuntu", + "14.04", + "basic", + ) + expected = deepcopy(self.pv) + assert result == expected + + def test_distro_has_packages(self): + result = util.has_packages_for_distro( + "sha1", + "ubuntu", + "14.04", + "basic", + package_versions=self.pv, + ) + assert result + + def test_distro_does_not_have_packages(self): + result = util.has_packages_for_distro( + "sha1", + "rhel", + "7.0", + "basic", + package_versions=self.pv, + ) + assert not result + + @patch("teuthology.suite.util.get_package_versions") + def test_has_packages_no_package_versions(self, m_get_package_versions): + m_get_package_versions.return_value = self.pv + result = util.has_packages_for_distro( + "sha1", + "rhel", + "7.0", + "basic",) + assert not result + + +class TestDistroDefaults(object): + def setup(self): + config.use_shaman = False + + def test_distro_defaults_saya(self): + expected = ('armv7l', 'saucy', + OS(name='ubuntu', version='13.10', codename='saucy')) + assert util.get_distro_defaults('ubuntu', 'saya') == expected + + def test_distro_defaults_plana(self): + expected = ('x86_64', 'xenial', + OS(name='ubuntu', version='16.04', codename='xenial')) + assert util.get_distro_defaults('ubuntu', 'plana') == expected + + def test_distro_defaults_debian(self): + expected = ('x86_64', 'wheezy', + OS(name='debian', version='7', codename='wheezy')) + assert util.get_distro_defaults('debian', 'magna') == expected + + def test_distro_defaults_centos(self): + expected = ('x86_64', 'centos7', + OS(name='centos', version='7', codename='core')) + assert util.get_distro_defaults('centos', 'magna') == expected + + def test_distro_defaults_fedora(self): + expected = ('x86_64', 'fedora20', + OS(name='fedora', version='20', codename='heisenbug')) + assert util.get_distro_defaults('fedora', 'magna') == expected + + def test_distro_defaults_default(self): + expected = ('x86_64', 'centos7', + OS(name='centos', version='7', codename='core')) + assert util.get_distro_defaults('rhel', 'magna') == expected diff --git a/teuthology/suite/util.py b/teuthology/suite/util.py new file mode 100644 index 0000000000..1955f85af5 --- /dev/null +++ b/teuthology/suite/util.py @@ -0,0 +1,499 @@ +import copy +import logging +import os +import requests +import smtplib +import socket +from subprocess import Popen, PIPE, DEVNULL +import sys + +from email.mime.text import MIMEText + +import teuthology.lock.query +import teuthology.lock.util +from teuthology import repo_utils + +from teuthology.config import config +from teuthology.exceptions import BranchNotFoundError, ScheduleFailError +from teuthology.misc import deep_merge +from teuthology.repo_utils import fetch_qa_suite, fetch_teuthology +from teuthology.orchestra.opsys import OS +from teuthology.packaging import get_builder_project +from teuthology.repo_utils import build_git_url +from teuthology.task.install import get_flavor + +log = logging.getLogger(__name__) + +CONTAINER_DISTRO = 'centos/8' # the one to check for build_complete +CONTAINER_FLAVOR = 'default' + + +def fetch_repos(branch, test_name, dry_run): + """ + Fetch the suite repo (and also the teuthology repo) so that we can use it + to build jobs. Repos are stored in ~/src/. + + The reason the teuthology repo is also fetched is that currently we use + subprocess to call teuthology-schedule to schedule jobs so we need to make + sure it is up-to-date. For that reason we always fetch the main branch + for test scheduling, regardless of what teuthology branch is requested for + testing. + + :returns: The path to the suite repo on disk + """ + try: + # When a user is scheduling a test run from their own copy of + # teuthology, let's not wreak havoc on it. + if config.automated_scheduling: + # We use teuthology's main branch in all cases right now + if config.teuthology_path is None: + fetch_teuthology('main') + suite_repo_path = fetch_qa_suite(branch) + except BranchNotFoundError as exc: + schedule_fail(message=str(exc), name=test_name, dry_run=dry_run) + return suite_repo_path + + +def schedule_fail(message, name='', dry_run=None): + """ + If an email address has been specified anywhere, send an alert there. Then + raise a ScheduleFailError. + Don't send the mail if --dry-run has been passed. + """ + email = config.results_email + if email and not dry_run: + subject = "Failed to schedule {name}".format(name=name) + msg = MIMEText(message) + msg['Subject'] = subject + msg['From'] = config.results_sending_email + msg['To'] = email + try: + smtp = smtplib.SMTP('localhost') + smtp.sendmail(msg['From'], [msg['To']], msg.as_string()) + smtp.quit() + except socket.error: + log.exception("Failed to connect to mail server!") + raise ScheduleFailError(message, name) + + +def get_worker(machine_type): + """ + Map a given machine_type to a beanstalkd worker. If machine_type mentions + multiple machine types - e.g. 'plana,mira', then this returns 'multi'. + Otherwise it returns what was passed. + """ + if ',' in machine_type: + return 'multi' + else: + return machine_type + + +def get_gitbuilder_hash(project=None, branch=None, flavor=None, + machine_type=None, distro=None, + distro_version=None): + """ + Find the hash representing the head of the project's repository via + querying a gitbuilder repo. + + Will return None in the case of a 404 or any other HTTP error. + """ + # Alternate method for github-hosted projects - left here for informational + # purposes + # resp = requests.get( + # 'https://api.github.com/repos/ceph/ceph/git/refs/heads/main') + # hash = .json()['object']['sha'] + (arch, release, _os) = get_distro_defaults(distro, machine_type) + if distro is None: + distro = _os.name + bp = get_builder_project()( + project, + dict( + branch=branch, + flavor=flavor, + os_type=distro, + os_version=distro_version, + arch=arch, + ), + ) + return bp.sha1 + + +def get_distro_defaults(distro, machine_type): + """ + Given a distro (e.g. 'ubuntu') and machine type, return: + (arch, release, pkg_type) + + This is used to default to: + ('x86_64', 'trusty', 'deb') when passed 'ubuntu' and 'plana' + ('armv7l', 'saucy', 'deb') when passed 'ubuntu' and 'saya' + ('x86_64', 'wheezy', 'deb') when passed 'debian' + ('x86_64', 'fedora20', 'rpm') when passed 'fedora' + And ('x86_64', 'centos7', 'rpm') when passed anything else + """ + arch = 'x86_64' + if distro in (None, 'None'): + os_type = 'centos' + os_version = '7' + elif distro in ('rhel', 'centos'): + os_type = 'centos' + os_version = '7' + elif distro == 'ubuntu': + os_type = distro + if machine_type == 'saya': + os_version = '13.10' + arch = 'armv7l' + else: + os_version = '16.04' + elif distro == 'debian': + os_type = distro + os_version = '7' + elif distro == 'fedora': + os_type = distro + os_version = '20' + elif distro == 'opensuse': + os_type = distro + os_version = '15.1' + else: + raise ValueError("Invalid distro value passed: %s", distro) + _os = OS(name=os_type, version=os_version) + release = get_builder_project()._get_distro( + _os.name, + _os.version, + _os.codename, + ) + template = "Defaults for machine_type {mtype} distro {distro}: " \ + "arch={arch}, release={release}, pkg_type={pkg}" + log.debug(template.format( + mtype=machine_type, + distro=_os.name, + arch=arch, + release=release, + pkg=_os.package_type) + ) + return ( + arch, + release, + _os, + ) + + +def git_ls_remote(project_or_url, branch, project_owner='ceph'): + """ + Find the latest sha1 for a given project's branch. + + :param project_or_url: Either a project name or a full URL + :param branch: The branch to query + :param project_owner: The GitHub project owner. Only used when a project + name is passed; not when a URL is passed + :returns: The sha1 if found; else None + """ + if '://' in project_or_url or project_or_url.startswith('git@'): + url = project_or_url + else: + url = build_git_url(project_or_url, project_owner) + return repo_utils.ls_remote(url, branch) + + +def git_validate_sha1(project, sha1, project_owner='ceph'): + ''' + Use http to validate that project contains sha1 + I can't find a way to do this with git, period, so + we have specific urls to HEAD for github and git.ceph.com/gitweb + for now + ''' + url = build_git_url(project, project_owner) + + if '/github.com/' in url: + url = '/'.join((url, 'commit', sha1)) + elif '/git.ceph.com/' in url: + # kinda specific to knowing git.ceph.com is gitweb + url = ('http://git.ceph.com/?p=%s.git;a=blob_plain;f=.gitignore;hb=%s' + % (project, sha1)) + else: + raise RuntimeError( + 'git_validate_sha1: how do I check %s for a sha1?' % url + ) + + resp = requests.head(url) + if resp.ok: + return sha1 + return None + + +def git_branch_exists(project_or_url, branch, project_owner='ceph'): + """ + Query the git repository to check the existence of a project's branch + + :param project_or_url: Either a project name or a full URL + :param branch: The branch to query + :param project_owner: The GitHub project owner. Only used when a project + name is passed; not when a URL is passed + """ + return git_ls_remote(project_or_url, branch, project_owner) is not None + + +def get_branch_info(project, branch, project_owner='ceph'): + """ + NOTE: This is currently not being used because of GitHub's API rate + limiting. We use github_branch_exists() instead. + + Use the GitHub API to query a project's branch. Returns: + {u'object': {u'sha': , + u'type': , + u'url': }, + u'ref': u'refs/heads/', + u'url': } + + We mainly use this to check if a branch exists. + """ + url_templ = 'https://api.github.com/repos/{project_owner}/{project}/git/refs/heads/{branch}' # noqa + url = url_templ.format(project_owner=project_owner, project=project, + branch=branch) + resp = requests.get(url) + if resp.ok: + return resp.json() + + +def package_version_for_hash(hash, flavor='default', distro='rhel', + distro_version='8.0', machine_type='smithi'): + """ + Does what it says on the tin. Uses gitbuilder repos. + + :returns: a string. + """ + (arch, release, _os) = get_distro_defaults(distro, machine_type) + if distro in (None, 'None'): + distro = _os.name + bp = get_builder_project()( + 'ceph', + dict( + flavor=flavor, + os_type=distro, + os_version=distro_version, + arch=arch, + sha1=hash, + ), + ) + + if bp.distro == CONTAINER_DISTRO and bp.flavor == CONTAINER_FLAVOR: + log.info('container build %s, checking for build_complete' % bp.distro) + if not bp.build_complete: + log.info('build not complete') + return None + + return bp.version + + +def get_arch(machine_type): + """ + Based on a given machine_type, return its architecture by querying the lock + server. + + :returns: A string or None + """ + result = teuthology.lock.query.list_locks(machine_type=machine_type, count=1) + if not result: + log.warning("No machines found with machine_type %s!", machine_type) + else: + return result[0]['arch'] + + +def strip_fragment_path(original_path): + """ + Given a path, remove the text before '/suites/'. Part of the fix for + http://tracker.ceph.com/issues/15470 + """ + scan_after = '/suites/' + scan_start = original_path.find(scan_after) + if scan_start > 0: + return original_path[scan_start + len(scan_after):] + return original_path + + +def get_install_task_flavor(job_config): + """ + Pokes through the install task's configuration (including its overrides) to + figure out which flavor it will want to install. + + Only looks at the first instance of the install task in job_config. + """ + project, = job_config.get('project', 'ceph'), + tasks = job_config.get('tasks', dict()) + overrides = job_config.get('overrides', dict()) + install_overrides = overrides.get('install', dict()) + project_overrides = install_overrides.get(project, dict()) + first_install_config = dict() + for task in tasks: + if list(task.keys())[0] == 'install': + first_install_config = list(task.values())[0] or dict() + break + first_install_config = copy.deepcopy(first_install_config) + deep_merge(first_install_config, install_overrides) + deep_merge(first_install_config, project_overrides) + return get_flavor(first_install_config) + + +def get_package_versions(sha1, os_type, os_version, flavor, + package_versions=None): + """ + Will retrieve the package versions for the given sha1, os_type/version, + and flavor from gitbuilder. + + Optionally, a package_versions dict can be provided + from previous calls to this function to avoid calling gitbuilder for + information we've already retrieved. + + The package_versions dict will be in the following format:: + + { + "sha1": { + "ubuntu": { + "14.04": { + "basic": "version", + } + "15.04": { + "notcmalloc": "version", + } + } + "rhel": { + "basic": "version", + } + }, + "another-sha1": { + "ubuntu": { + "basic": "version", + } + } + } + + :param sha1: The sha1 hash of the ceph version. + :param os_type: The distro we want to get packages for, given + the ceph sha1. Ex. 'ubuntu', 'rhel', etc. + :param os_version: The distro's version, e.g. '14.04', '7.0' + :param flavor: Package flavor ('testing', 'notcmalloc', etc.) + :param package_versions: Use this optionally to use cached results of + previous calls to gitbuilder. + :returns: A dict of package versions. Will return versions + for all hashes/distros/vers, not just for the given + hash/distro/ver. + """ + if package_versions is None: + package_versions = dict() + + os_type = str(os_type) + + os_types = package_versions.get(sha1, dict()) + os_versions = os_types.get(os_type, dict()) + flavors = os_versions.get(os_version, dict()) + if flavor not in flavors: + package_version = package_version_for_hash( + sha1, + flavor, + distro=os_type, + distro_version=os_version, + ) + flavors[flavor] = package_version + os_versions[os_version] = flavors + os_types[os_type] = os_versions + package_versions[sha1] = os_types + + return package_versions + + +def has_packages_for_distro(sha1, os_type, os_version, flavor, + package_versions=None): + """ + Checks to see if gitbuilder has packages for the given sha1, os_type and + flavor. + + See above for package_versions description. + + :param sha1: The sha1 hash of the ceph version. + :param os_type: The distro we want to get packages for, given + the ceph sha1. Ex. 'ubuntu', 'rhel', etc. + :param flavor: The ceph packages shaman flavor + :param package_versions: Use this optionally to use cached results of + previous calls to gitbuilder. + :returns: True, if packages are found. False otherwise. + """ + os_type = str(os_type) + if package_versions is None: + package_versions = get_package_versions( + sha1, os_type, os_version, flavor) + + flavors = package_versions.get(sha1, dict()).get( + os_type, dict()).get( + os_version, dict()) + # we want to return a boolean here, not the actual package versions + return bool(flavors.get(flavor, None)) + + +def teuthology_schedule(args, verbose, dry_run, log_prefix='', stdin=None): + """ + Run teuthology-schedule to schedule individual jobs. + + If --dry-run has been passed but --verbose has been passed just once, don't + actually run the command - only print what would be executed. + + If --dry-run has been passed and --verbose has been passed multiple times, + do both. + """ + exec_path = os.path.join( + os.path.dirname(sys.argv[0]), + 'teuthology-schedule') + args.insert(0, exec_path) + if dry_run: + # Quote any individual args so that individual commands can be copied + # and pasted in order to execute them individually. + printable_args = [] + for item in args: + if ' ' in item: + printable_args.append("'%s'" % item) + else: + printable_args.append(item) + log.info('{0}{1}'.format( + log_prefix, + ' '.join(printable_args), + )) + if not dry_run or (dry_run and verbose > 1): + astdin = DEVNULL if stdin is None else PIPE + p = Popen(args, stdin=astdin) + if stdin is not None: + p.communicate(input=stdin.encode('utf-8')) + else: + p.communicate() + +def find_git_parent(project, sha1): + + base_url = config.githelper_base_url + if not base_url: + log.warning('githelper_base_url not set, --newest disabled') + return None + + def refresh(project): + url = '%s/%s.git/refresh' % (base_url, project) + resp = requests.get(url) + if not resp.ok: + log.error('git refresh failed for %s: %s', + project, resp.content.decode()) + + def get_sha1s(project, committish, count): + url = '/'.join((base_url, '%s.git' % project, + 'history/?committish=%s&count=%d' % (committish, count))) + resp = requests.get(url) + resp.raise_for_status() + sha1s = resp.json()['sha1s'] + if len(sha1s) != count: + log.debug('got response: %s', resp.json()) + log.error('can''t find %d parents of %s in %s: %s', + int(count), sha1, project, resp.json()['error']) + return sha1s + + # XXX don't do this every time?.. + refresh(project) + # we want the one just before sha1; list two, return the second + sha1s = get_sha1s(project, sha1, 2) + if len(sha1s) == 2: + return sha1s[1] + else: + return None diff --git a/teuthology/task/__init__.py b/teuthology/task/__init__.py new file mode 100644 index 0000000000..eb1d04c8b8 --- /dev/null +++ b/teuthology/task/__init__.py @@ -0,0 +1,136 @@ +import logging + +from teuthology.misc import deep_merge +from teuthology.orchestra.cluster import Cluster + +log = logging.getLogger(__name__) + + +class Task(object): + """ + A base-class for "new-style" teuthology tasks. + + Can be used as a drop-in replacement for the old-style task functions with + @contextmanager decorators. + + Note: While looking up overrides, we use the lowercase name of the class by + default. While this works well for the main task in a module, other + tasks or 'subtasks' may want to override that name using a class + variable called 'name' e.g.: + + class MyTask(Task): + pass + class MySubtask(MyTask): + name = 'mytask.mysubtask' + """ + + def __init__(self, ctx=None, config=None): + if not hasattr(self, 'name'): + self.name = self.__class__.__name__.lower() + self.log = log + self.ctx = ctx + self.config = config or dict() + if not isinstance(self.config, dict): + raise TypeError("config must be a dict") + self.apply_overrides() + self.filter_hosts() + + def apply_overrides(self): + """ + Look for an 'overrides' dict in self.ctx.config; look inside that for a + dict with the same name as this task. Override any settings in + self.config with those overrides + """ + if not hasattr(self.ctx, 'config'): + return + all_overrides = self.ctx.config.get('overrides', dict()) + if not all_overrides: + return + task_overrides = all_overrides.get(self.name) + if task_overrides: + self.log.debug( + "Applying overrides for task {name}: {overrides}".format( + name=self.name, overrides=task_overrides) + ) + deep_merge(self.config, task_overrides) + + def filter_hosts(self): + """ + Look for a 'hosts' list in self.config. Each item in the list may + either be a role or a hostname. Builds a new Cluster object containing + only those hosts which match one (or more) of the roles or hostnames + specified. The filtered Cluster object is stored as self.cluster so + that the task may only run against those hosts. + """ + if not hasattr(self.ctx, 'cluster'): + return + elif 'hosts' not in self.config: + self.cluster = self.ctx.cluster + return self.cluster + host_specs = self.config.get('hosts', list()) + cluster = Cluster() + for host_spec in host_specs: + role_matches = self.ctx.cluster.only(host_spec) + if len(role_matches.remotes) > 0: + for (remote, roles) in role_matches.remotes.items(): + cluster.add(remote, roles) + elif isinstance(host_spec, str): + for (remote, roles) in self.ctx.cluster.remotes.items(): + if remote.name.split('@')[-1] == host_spec or \ + remote.shortname == host_spec: + cluster.add(remote, roles) + if not cluster.remotes: + raise RuntimeError("All target hosts were excluded!") + self.cluster = cluster + hostnames = [h.shortname for h in self.cluster.remotes.keys()] + self.log.debug("Restricting task {name} to hosts: {hosts}".format( + name=self.name, hosts=' '.join(hostnames)) + ) + return self.cluster + + def setup(self): + """ + Perform any setup that is needed by the task before it executes + """ + pass + + def begin(self): + """ + Execute the main functionality of the task + """ + pass + + def end(self): + """ + Perform any work needed to stop processes started in begin() + """ + pass + + def teardown(self): + """ + Perform any work needed to restore configuration to a previous state. + + Can be skipped by setting 'skip_teardown' to True in self.config + """ + pass + + def __enter__(self): + """ + When using an instance of the class as a context manager, this method + calls self.setup(), then calls self.begin() and returns self. + """ + self.setup() + self.begin() + return self + + def __exit__(self, type_, value, traceback): + """ + When using an instance of the class as a context manager, this method + calls self.end() and self.teardown() - unless + self.config['skip_teardown'] is True + """ + self.end() + if self.config.get('skip_teardown', False): + self.log.info("Skipping teardown") + else: + self.teardown() diff --git a/teuthology/task/ansible.py b/teuthology/task/ansible.py new file mode 100644 index 0000000000..a89ec911d9 --- /dev/null +++ b/teuthology/task/ansible.py @@ -0,0 +1,429 @@ +import json +import logging +import requests +import os +import pexpect +import yaml +import shutil + +from tempfile import mkdtemp, NamedTemporaryFile + +from teuthology.config import config as teuth_config +from teuthology.exceptions import CommandFailedError, AnsibleFailedError +from teuthology.job_status import set_status +from teuthology.repo_utils import fetch_repo + +from teuthology.task import Task + +log = logging.getLogger(__name__) + +class LoggerFile(object): + """ + A thin wrapper around a logging.Logger instance that provides a file-like + interface. + + Used by Ansible.execute_playbook() when it calls pexpect.run() + """ + def __init__(self, logger, level): + self.logger = logger + self.level = level + + def write(self, string): + self.logger.log(self.level, string.decode('utf-8', 'ignore')) + + def flush(self): + pass + + +class Ansible(Task): + """ + A task to run ansible playbooks + + Required configuration parameters: + playbook: Required; can either be a list of plays, or a path/URL to a + playbook. In the case of a path, it may be relative to the + repo's on-disk location (if a repo is provided), or + teuthology's working directory. + + Optional configuration parameters: + repo: A path or URL to a repo (defaults to '.'). Given a repo + value of 'foo', ANSIBLE_ROLES_PATH is set to 'foo/roles' + branch: If pointing to a remote git repo, use this branch. Defaults + to 'main'. + hosts: A list of teuthology roles or partial hostnames (or a + combination of the two). ansible-playbook will only be run + against hosts that match. + inventory: A path to be passed to ansible-playbook with the + --inventory-file flag; useful for playbooks that also have + vars they need access to. If this is not set, we check for + /etc/ansible/hosts and use that if it exists. If it does + not, we generate a temporary file to use. + tags: A string including any (comma-separated) tags to be passed + directly to ansible-playbook. + skip_tags: A string of comma-separated tags that will be skipped by + passing them to ansible-playbook using --skip-tags. + vars: A dict of vars to be passed to ansible-playbook via the + --extra-vars flag + group_vars: A dict with keys matching relevant group names in the + playbook, and values to be written in the corresponding + inventory's group_vars files. Only applies to inventories + generated by this task. + cleanup: If present, the given or generated playbook will be run + again during teardown with a 'cleanup' var set to True. + This will allow the playbook to clean up after itself, + if the playbook supports this feature. + reconnect: If set to True (the default), then reconnect to hosts after + ansible-playbook completes. This is in case the playbook + makes changes to the SSH configuration, or user accounts - + we would want to reflect those changes immediately. + + Examples: + + tasks: + - ansible: + repo: https://github.com/ceph/ceph-cm-ansible.git + playbook: + - roles: + - some_role + - another_role + hosts: + - client.0 + - host1 + + tasks: + - ansible: + repo: /path/to/repo + inventory: /path/to/inventory + playbook: /path/to/playbook.yml + tags: my_tags + skip_tags: my_skipped_tags + vars: + var1: string_value + var2: + - list_item + var3: + key: value + + """ + # set this in subclasses to provide a group to + # assign hosts to for dynamic inventory creation + inventory_group = None + + def __init__(self, ctx, config): + super(Ansible, self).__init__(ctx, config) + self.generated_inventory = False + self.generated_playbook = False + self.log = logging.Logger(__name__) + if ctx.archive: + self.log.addHandler(logging.FileHandler( + os.path.join(ctx.archive, "ansible.log"))) + + def setup(self): + super(Ansible, self).setup() + self.find_repo() + self.get_playbook() + self.get_inventory() or self.generate_inventory() + if not hasattr(self, 'playbook_file'): + self.generate_playbook() + + @property + def failure_log(self): + if not hasattr(self, '_failure_log'): + self._failure_log = NamedTemporaryFile( + prefix="teuth_ansible_failures_", + delete=False, + ) + return self._failure_log + + def find_repo(self): + """ + Locate the repo we're using; cloning it from a remote repo if necessary + """ + repo = self.config.get('repo', '.') + if repo.startswith(('http://', 'https://', 'git@', 'git://')): + repo_path = fetch_repo( + repo, + self.config.get('branch', 'main'), + ) + else: + repo_path = os.path.abspath(os.path.expanduser(repo)) + self.repo_path = repo_path + + def get_playbook(self): + """ + If necessary, fetch and read the playbook file + """ + playbook = self.config['playbook'] + if isinstance(playbook, list): + # Multiple plays in a list + self.playbook = playbook + elif isinstance(playbook, str) and playbook.startswith(('http://', + 'https://')): + response = requests.get(playbook) + response.raise_for_status() + self.playbook = yaml.safe_load(response.text) + elif isinstance(playbook, str): + try: + playbook_path = os.path.expanduser(playbook) + if not playbook_path.startswith('/'): + # If the path is not absolute at this point, look for the + # playbook in the repo dir. If it's not there, we assume + # the path is relative to the working directory + pb_in_repo = os.path.join(self.repo_path, playbook_path) + if os.path.exists(pb_in_repo): + playbook_path = pb_in_repo + self.playbook_file = open(playbook_path) + playbook_yaml = yaml.safe_load(self.playbook_file) + self.playbook = playbook_yaml + except Exception: + log.error("Unable to read playbook file %s", playbook) + raise + else: + raise TypeError( + "playbook value must either be a list, URL or a filename") + log.info("Playbook: %s", self.playbook) + + def get_inventory(self): + """ + Determine whether or not we're using an existing inventory file + """ + self.inventory = self.config.get('inventory') + etc_ansible_hosts = '/etc/ansible/hosts' + if self.inventory: + self.inventory = os.path.expanduser(self.inventory) + elif os.path.exists(etc_ansible_hosts): + self.inventory = etc_ansible_hosts + return self.inventory + + def generate_inventory(self): + """ + Generate a hosts (inventory) file to use. This should not be called if + we're using an existing file. + """ + hosts = self.cluster.remotes.keys() + hostnames = [remote.hostname for remote in hosts] + hostnames.sort() + inventory = [] + if self.inventory_group: + inventory.append('[{0}]'.format(self.inventory_group)) + inventory.extend(hostnames + ['']) + hosts_str = '\n'.join(inventory) + self.inventory = self._write_inventory_files(hosts_str) + self.generated_inventory = True + + def _write_inventory_files(self, inventory, inv_suffix=''): + """ + Actually write the inventory files. Writes out group_vars files as + necessary based on configuration. + + :param inventory: The content of the inventory file itself, as a + string + :param inv_suffix: The suffix to use for the inventory filename + """ + # First, create the inventory directory + inventory_dir = mkdtemp( + prefix="teuth_ansible_inventory", + ) + inv_fn = os.path.join(inventory_dir, 'inventory') + if inv_suffix: + inv_fn = '.'.join(inv_fn, inv_suffix) + # Write out the inventory file + inv_file = open(inv_fn, 'w') + inv_file.write(inventory) + # Next, write the group_vars files + all_group_vars = self.config.get('group_vars') + if all_group_vars: + group_vars_dir = os.path.join(inventory_dir, 'group_vars') + os.mkdir(group_vars_dir) + # We loop over a sorted list of keys here because we want our tests + # to be able to mock predictably + for group_name in sorted(all_group_vars): + group_vars = all_group_vars[group_name] + path = os.path.join(group_vars_dir, group_name + '.yml') + gv_file = open(path, 'w') + yaml.safe_dump(group_vars, gv_file) + + return inventory_dir + + def generate_playbook(self): + """ + Generate a playbook file to use. This should not be called if we're + using an existing file. + """ + playbook_file = NamedTemporaryFile( + prefix="teuth_ansible_playbook_", + dir=self.repo_path, + delete=False, + ) + yaml.safe_dump(self.playbook, playbook_file, explicit_start=True) + playbook_file.flush() + self.playbook_file = playbook_file + self.generated_playbook = True + + def begin(self): + super(Ansible, self).begin() + self.execute_playbook() + + def execute_playbook(self, _logfile=None): + """ + Execute ansible-playbook + + :param _logfile: Use this file-like object instead of a LoggerFile for + testing + """ + environ = os.environ + environ['ANSIBLE_SSH_PIPELINING'] = '1' + environ['ANSIBLE_FAILURE_LOG'] = self.failure_log.name + environ['ANSIBLE_ROLES_PATH'] = "%s/roles" % self.repo_path + environ['ANSIBLE_NOCOLOR'] = "1" + args = self._build_args() + command = ' '.join(args) + log.debug("Running %s", command) + + out, status = pexpect.run( + command, + cwd=self.repo_path, + logfile=_logfile or LoggerFile(self.log, logging.INFO), + withexitstatus=True, + timeout=None, + ) + if status != 0: + self._handle_failure(command, status) + + if self.config.get('reconnect', True) is True: + remotes = list(self.cluster.remotes) + log.debug("Reconnecting to %s", remotes) + for remote in remotes: + remote.reconnect() + + def _handle_failure(self, command, status): + self._set_status('dead') + failures = None + with open(self.failure_log.name, 'r') as fail_log: + try: + failures = yaml.safe_load(fail_log) + except yaml.YAMLError as e: + log.error( + "Failed to parse ansible failure log: {0} ({1})".format( + self.failure_log.name, e + ) + ) + fail_log.seek(0) + failures = fail_log.read().replace('\n', '') + + if failures: + self._archive_failures() + raise AnsibleFailedError(failures) + raise CommandFailedError(command, status) + + def _set_status(self, status): + """ + Not implemented in the base class + """ + pass + + def _archive_failures(self): + if self.ctx.archive: + archive_path = "{0}/ansible_failures.yaml".format(self.ctx.archive) + log.info("Archiving ansible failure log at: {0}".format( + archive_path, + )) + shutil.move( + self.failure_log.name, + archive_path + ) + os.chmod(archive_path, 0o664) + + def _build_args(self): + """ + Assemble the list of args to be executed + """ + fqdns = [r.hostname for r in self.cluster.remotes.keys()] + # Assume all remotes use the same username + user = list(self.cluster.remotes)[0].user + extra_vars = dict(ansible_ssh_user=user) + extra_vars.update(self.config.get('vars', dict())) + args = [ + 'ansible-playbook', '-v', + "--extra-vars", "'%s'" % json.dumps(extra_vars), + '-i', self.inventory, + '--limit', ','.join(fqdns), + self.playbook_file.name, + ] + tags = self.config.get('tags') + if tags: + args.extend(['--tags', tags]) + skip_tags = self.config.get('skip_tags') + if skip_tags: + args.extend(['--skip-tags', skip_tags]) + return args + + def teardown(self): + self._cleanup() + if self.generated_inventory: + shutil.rmtree(self.inventory) + if self.generated_playbook: + os.remove(self.playbook_file.name) + super(Ansible, self).teardown() + + def _cleanup(self): + """ + If the ``cleanup`` key exists in config the same playbook will be + run again during the teardown step with the var ``cleanup`` given with + a value of ``True``. If supported, this will allow the playbook to + cleanup after itself during teardown. + """ + if self.config.get("cleanup"): + log.info("Running ansible cleanup...") + extra = dict(cleanup=True) + if self.config.get('vars'): + self.config.get('vars').update(extra) + else: + self.config['vars'] = extra + self.execute_playbook() + else: + log.info("Skipping ansible cleanup...") + + +class CephLab(Ansible): + __doc__ = """ + A very simple subclass of Ansible that defaults to: + + - ansible.cephlab: + repo: {git_base}ceph-cm-ansible.git + branch: main + playbook: cephlab.yml + + If a dynamic inventory is used, all hosts will be assigned to the + group 'testnodes'. + """.format(git_base=teuth_config.ceph_git_base_url) + + # Set the name so that Task knows to look up overrides for + # 'ansible.cephlab' instead of just 'cephlab' + name = 'ansible.cephlab' + inventory_group = 'testnodes' + + def __init__(self, ctx, config): + config = config or dict() + if 'playbook' not in config: + config['playbook'] = 'cephlab.yml' + if 'repo' not in config: + config['repo'] = teuth_config.get_ceph_cm_ansible_git_url() + super(CephLab, self).__init__(ctx, config) + + def begin(self): + # Write foo to ~/.vault_pass.txt if it's missing. + # In almost all cases we don't need the actual vault password. + # Touching an empty file broke as of Ansible 2.4 + vault_pass_path = os.path.expanduser('~/.vault_pass.txt') + if not os.path.exists(vault_pass_path): + with open(vault_pass_path, 'w') as f: + f.write('foo') + super(CephLab, self).begin() + + def _set_status(self, status): + set_status(self.ctx.summary, status) + + +task = Ansible +cephlab = CephLab diff --git a/teuthology/task/args.py b/teuthology/task/args.py new file mode 100644 index 0000000000..17e9e9dc00 --- /dev/null +++ b/teuthology/task/args.py @@ -0,0 +1,60 @@ +""" +These routines only appear to be used by the peering_speed tests. +""" +def gen_args(name, args): + """ + Called from argify to generate arguments. + """ + usage = [""] + usage += [name + ':'] + usage += \ + [" {key}: <{usage}> ({default})".format( + key=key, usage=_usage, default=default) + for (key, _usage, default, _) in args] + usage.append('') + usage.append(name + ':') + usage += \ + [" {key}: {default}".format( + key = key, default = default) + for (key, _, default, _) in args] + usage = '\n'.join(' ' + i for i in usage) + def ret(config): + """ + return an object with attributes set from args. + """ + class Object(object): + """ + simple object + """ + pass + obj = Object() + for (key, usage, default, conv) in args: + if key in config: + setattr(obj, key, conv(config[key])) + else: + setattr(obj, key, conv(default)) + return obj + return usage, ret + +def argify(name, args): + """ + Object used as a decorator for the peering speed tests. + See peering_spee_test.py + """ + (usage, config_func) = gen_args(name, args) + def ret1(f): + """ + Wrapper to handle doc and usage information + """ + def ret2(**kwargs): + """ + Call f (the parameter passed to ret1) + """ + config = kwargs.get('config', {}) + if config is None: + config = {} + kwargs['config'] = config_func(config) + return f(**kwargs) + ret2.__doc__ = f.__doc__ + usage + return ret2 + return ret1 diff --git a/teuthology/task/background_exec.py b/teuthology/task/background_exec.py new file mode 100644 index 0000000000..897b525312 --- /dev/null +++ b/teuthology/task/background_exec.py @@ -0,0 +1,76 @@ +""" +Background task +""" + +import contextlib +import logging + +from teuthology import misc +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run a background task. + + Run the given command on a client, similar to exec. However, when + we hit the finally because the subsequent task is ready to exit, kill + the child process. + + We do not do any error code checking here since we are forcefully killing + off the child when we are done. + + If the command a list, we simply join it with ;'s. + + Example:: + + tasks: + - install: + - background_exec: + client.0: while true ; do date ; sleep 1 ; done + client.1: + - while true + - do id + - sleep 1 + - done + - exec: + client.0: + - sleep 10 + + """ + assert isinstance(config, dict), "task background got invalid config" + + testdir = misc.get_testdir(ctx) + + tasks = {} + for role, cmd in config.items(): + (remote,) = ctx.cluster.only(role).remotes.keys() + log.info('Running background command on role %s host %s', role, + remote.name) + if isinstance(cmd, list): + cmd = '; '.join(cmd) + cmd.replace('$TESTDIR', testdir) + tasks[remote.name] = remote.run( + args=[ + 'sudo', + 'TESTDIR=%s' % testdir, + 'daemon-helper', 'kill', '--kill-group', + 'bash', '-c', cmd, + ], + wait=False, + stdin=run.PIPE, + check_status=False, + logger=log.getChild(remote.name) + ) + + try: + yield + + finally: + for name, task in tasks.items(): + log.info('Stopping background command on %s', name) + task.stdin.close() + run.wait(tasks.values()) diff --git a/teuthology/task/buildpackages.py b/teuthology/task/buildpackages.py new file mode 100644 index 0000000000..ae56af01fa --- /dev/null +++ b/teuthology/task/buildpackages.py @@ -0,0 +1,245 @@ +""" +Build ceph packages + +Unit tests: + +py.test -v -s tests/test_buildpackages.py + +Integration tests: + +teuthology-openstack --verbose --key-name myself --key-filename ~/Downloads/myself --ceph infernalis --suite teuthology/buildpackages + +""" +import copy +import logging +import os +import types +from teuthology import packaging +from teuthology import misc +from teuthology.config import config as teuth_config +from teuthology.openstack import OpenStack + +log = logging.getLogger(__name__) + +class LocalGitbuilderProject(packaging.GitbuilderProject): + + def __init__(self): + pass + + +def get_pkg_type(os_type): + if os_type in ('centos', 'fedora', 'opensuse', 'rhel', 'sle'): + return 'rpm' + else: + return 'deb' + +def apply_overrides(ctx, config): + if config is None: + config = {} + else: + config = copy.deepcopy(config) + + assert isinstance(config, dict), \ + "task install only supports a dictionary for configuration" + + project, = config.get('project', 'ceph'), + log.debug('project %s' % project) + overrides = ctx.config.get('overrides') + if overrides: + install_overrides = overrides.get('install', {}) + misc.deep_merge(config, install_overrides.get(project, {})) + return config + +def get_config_install(ctx, config): + config = apply_overrides(ctx, config) + log.debug('install config %s' % config) + return [(config.get('flavor', 'default'), + config.get('tag', ''), + config.get('branch', ''), + config.get('sha1'))] + +def get_config_install_upgrade(ctx, config): + log.debug('install.upgrade config before override %s' % config) + configs = [] + for (role, role_config) in config.items(): + if role_config is None: + role_config = {} + o = apply_overrides(ctx, role_config) + + log.debug('install.upgrade config ' + str(role_config) + + ' and with overrides ' + str(o)) + # for install.upgrade overrides are actually defaults + configs.append((o.get('flavor', 'default'), + role_config.get('tag', o.get('tag', '')), + role_config.get('branch', o.get('branch', '')), + role_config.get('sha1', o.get('sha1')))) + return configs + +GET_CONFIG_FUNCTIONS = { + 'install': get_config_install, + 'install.upgrade': get_config_install_upgrade, +} + +def lookup_configs(ctx, node): + configs = [] + if type(node) is types.ListType: + for leaf in node: + configs.extend(lookup_configs(ctx, leaf)) + elif type(node) is types.DictType: + for (key, value) in node.items(): + if key in ('install', 'install.upgrade'): + configs.extend(GET_CONFIG_FUNCTIONS[key](ctx, value)) + elif key in ('overrides',): + pass + else: + configs.extend(lookup_configs(ctx, value)) + return configs + +def get_sha1(ref): + url = teuth_config.get_ceph_git_url() + ls_remote = misc.sh("git ls-remote " + url + " " + ref) + return ls_remote.split()[0] + +def task(ctx, config): + """ + Build Ceph packages. This task will automagically be run + before the task that need to install packages (this is taken + care of by the internal teuthology task). + + The config should be as follows: + + buildpackages: + good_machine: + disk: 40 # GB + ram: 48000 # MB + cpus: 16 + min_machine: + disk: 40 # GB + ram: 8000 # MB + cpus: 1 + + example: + + tasks: + - buildpackages: + good_machine: + disk: 40 # GB + ram: 15000 # MB + cpus: 16 + min_machine: + disk: 40 # GB + ram: 8000 # MB + cpus: 1 + - install: + + When a buildpackages task is already included, the values it contains can be + overriden with: + + overrides: + buildpackages: + good_machine: + disk: 20 # GB + ram: 2000 # MB + cpus: 2 + min_machine: + disk: 10 # GB + ram: 1000 # MB + cpus: 1 + + """ + log.info('Beginning buildpackages...') + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for config not ' + str(config) + overrides = ctx.config.get('overrides', {}) + misc.deep_merge(config, overrides.get('buildpackages', {})) + d = os.path.join(os.path.dirname(__file__), 'buildpackages') + os_type = misc.get_distro(ctx) + os_version = misc.get_distro_version(ctx) + arch = ctx.config.get('arch', OpenStack().get_default_arch()) + dist = LocalGitbuilderProject()._get_distro(distro=os_type, + version=os_version) + pkg_type = get_pkg_type(os_type) + misc.sh( + "flock --close /tmp/buildpackages " + + "make -C " + d + " " + os.environ['HOME'] + "/.ssh_agent") + for (flavor, tag, branch, sha1) in lookup_configs(ctx, ctx.config): + if tag: + sha1 = get_sha1(tag) + elif branch: + sha1 = get_sha1(branch) + log.info("building flavor = " + flavor + "," + + " tag = " + tag + "," + + " branch = " + branch + "," + + " sha1 = " + sha1) + self_name = 'teuthology' + key_name = 'teuthology' + pkg_repo = 'packages-repository' + security_group = 'teuthology' + if teuth_config.openstack.has_key('selfname'): + self_name = teuth_config.openstack['selfname'] + if teuth_config.openstack.has_key('keypair'): + key_name = teuth_config.openstack['keypair'] + if teuth_config.openstack.has_key('package_repo'): + pkg_repo = teuth_config.openstack['package_repo'] + if teuth_config.openstack.has_key('server_group'): + security_group = teuth_config.openstack['server_group'] + target = (self_name + '-ceph-' + + pkg_type + '-' + + dist + '-' + + arch + '-' + + flavor + '-' + + sha1) + openstack = OpenStack() + openstack.set_provider() + network = openstack.net() + if network != "": + network = " OPENSTACK_NETWORK='" + network + "' " + openstack.image(os_type, os_version, arch) # create if it does not exist + build_flavor = openstack.flavor_range( + config['min_machine'], config['good_machine'], arch) + default_arch = openstack.get_default_arch() + http_flavor = openstack.flavor({ + 'disk': 30, # GB + 'ram': 1024, # MB + 'cpus': 1, + }, default_arch) + + lock = "/tmp/buildpackages-" + sha1 + "-" + os_type + "-" + os_version + cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " + + " flock --close " + lock + + " make -C " + d + + network + + " SELFNAME=" + self_name + + " KEY_NAME=" + key_name + + " PKG_REPO=" + pkg_repo + + " SEC_GROUP=" + security_group + + " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() + + " CEPH_PKG_TYPE=" + pkg_type + + " CEPH_OS_TYPE=" + os_type + + " CEPH_OS_VERSION=" + os_version + + " CEPH_DIST=" + dist + + " CEPH_ARCH=" + arch + + " CEPH_SHA1=" + sha1 + + " CEPH_TAG=" + tag + + " CEPH_BRANCH=" + branch + + " CEPH_FLAVOR=" + flavor + + " BUILD_FLAVOR=" + build_flavor + + " HTTP_FLAVOR=" + http_flavor + + " HTTP_ARCH=" + default_arch + + " BUILDPACKAGES_CANONICAL_TAGS=" + + ("true" if teuth_config.canonical_tags else "false") + + " " + target + + " ") + log.info("Executing the following make command to build {} packages. " \ + "Note that some values in the command, like CEPH_GIT_URL " \ + "and BUILDPACKAGES_CANONICAL_TAGS, may differ from similar " \ + "command-line parameter values. This is because " \ + "the values used by this task are taken from the teuthology " \ + "configuration file. If in doubt, tear down your teuthology " \ + "instance and start again from scratch.".format(pkg_type)) + log.info("buildpackages make command: " + cmd) + misc.sh(cmd) + teuth_config.gitbuilder_host = openstack.get_ip(pkg_repo, '') + log.info('Finished buildpackages') diff --git a/teuthology/task/buildpackages/Makefile b/teuthology/task/buildpackages/Makefile new file mode 100644 index 0000000000..9de81db4be --- /dev/null +++ b/teuthology/task/buildpackages/Makefile @@ -0,0 +1,84 @@ +SHELL=/bin/bash +D=/tmp/stampsdir +VPATH=${D} +TIMEOUT_SERVER_CREATE = 30m +TIMEOUT_BUILD = 220m # 20 minutes short of 4 hours +SEC_GROUP=teuthology +KEY_NAME=teuthology +SELFNAME=teuthology +PKG_REPO=packages-repository +PKG_REPO_OS_TYPE=ubuntu +PKG_REPO_OS_VERSION=14.04 +PKG_REPO_USER_DATA=${PKG_REPO_OS_TYPE}-${PKG_REPO_OS_VERSION}-user-data.txt + +# We want to extract the first listed IPv4 address! +# Openstack will provide the addresses field in this format: +# "net1-name=ip(, ip)+(; net2-name=ip(, ip)+)+" +# Each IP may be v4 or v6 (including shortened forms and IPv4-mapped-IPv6 forms) +# 1.2.3.4 +# 2001:db8:6050:ed4d:f816:3eff:fe48:3b36 +# 2001:db8::fe48:3b36 +# 2001:db8::1.2.3.4 +# Example long-form input: +# private-network=10.10.10.69, 2001:db8:6050:ed4d:f816:3eff:fed1:d9f8;net-name2=2001:db8::fe48:3b36, 2001:db8::1.2.3.4, 1.2.3.4; +# TODO: allow selection of the network instead of taking the first network +# TODO: Support IPv6 in future +define get_ip +$$(openstack server show -f value -c addresses $(1) |perl -pe 's/^[^=]+=([^;]+).*/\1/g; s/[ ,]/\n/g; ' |grep -v -e ':' -e '^$$' |head -n1) +endef + +MY_IP=$(shell hostname -I | cut -f1 -d' ') + +${HOME}/.ssh_agent: + ssh-agent -s > ${HOME}/.ssh_agent + source ${HOME}/.ssh_agent ; ssh-add ; ssh-add -l + grep -q ssh_agent ~/.bashrc_teuthology || echo 'source ${HOME}/.ssh_agent' >> ~/.bashrc_teuthology + +flock-${PKG_REPO}: + timeout $(TIMEOUT_SERVER_CREATE) openstack server create --image 'teuthology-ubuntu-14.04-${HTTP_ARCH}' ${OPENSTACK_NETWORK} --flavor ${HTTP_FLAVOR} --key-name ${KEY_NAME} --security-group ${SEC_GROUP} --property ownedby=${MY_IP} --user-data ${PKG_REPO_USER_DATA} --wait ${PKG_REPO} + sleep 30 + set -ex ; \ + ip=$(call get_ip,${PKG_REPO}) ; \ + for delay in 60 60 60 60 2 2 2; do sleep $$delay ; if ssh -o 'ConnectTimeout=3' $$ip bash -c '"grep -q READYTORUN /var/log/cloud-init*.log"' ; then break ; else echo "ssh status code $$?" ; fi ; done ; \ + ssh $$ip sudo apt-get update ; \ + ssh $$ip sudo apt-get install -y nginx rsync && \ + ssh $$ip sudo chown -R ubuntu /usr/share/nginx/html && \ + ssh $$ip sudo rm /usr/share/nginx/html/\* && \ + ssh $$ip sudo perl -pi -e '"s|location / {|location / { autoindex on;|"' /etc/nginx/sites-available/default && \ + ssh $$ip sudo /etc/init.d/nginx restart && \ + perl -pi -e "s/^gitbuilder_host:.*/gitbuilder_host: $$ip/" ~/.teuthology.yaml + touch ${D}/$@ + +${PKG_REPO}: + mkdir -p ${D} + flock --close ${D}/flock-$@.lock ${MAKE} flock-$@ + touch ${D}/$@ + +# Just because 'server create' return success does not mean it actually succeeded! +# Check the server status before we proceed. +# If it's a weird status, bail out and let the delete fire +# eg: ERROR status can happen if there is no VM host without enough capacity for the request. +${SELFNAME}-ceph-${CEPH_PKG_TYPE}-${CEPH_DIST}-${CEPH_ARCH}-${CEPH_FLAVOR}-${CEPH_SHA1}: ${PKG_REPO} + timeout $(TIMEOUT_SERVER_CREATE) openstack server create --image 'makecheck-${CEPH_OS_TYPE}-${CEPH_OS_VERSION}-${CEPH_ARCH}' ${OPENSTACK_NETWORK} --flavor ${BUILD_FLAVOR} --key-name ${KEY_NAME} --security-group ${SEC_GROUP} --property ownedby=${MY_IP} --user-data ${CEPH_OS_TYPE}-${CEPH_OS_VERSION}-user-data.txt --wait $@ + set -ex ; \ + trap "openstack server delete --wait $@" EXIT ; \ + for delay in 30 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 ; do \ + status=$$(openstack server show -c status -f value $@) ; \ + case $$status in \ + ACTIVE) break ;; \ + NOSTATE|*BUILD|*BOOT|*RESIZE) sleep $$delay ;; \ + *) exit 1 ;; \ + esac ; \ + done ; \ + ip=$(call get_ip,$@) ; \ + test -n "$$ip" || exit ; \ + for delay in 60 60 60 60 2 2 2 ; do sleep $$delay ; if ssh -o 'ConnectTimeout=3' $$ip bash -c '"grep -q READYTORUN /var/log/cloud-init*.log"' ; then break ; else echo "ssh status code $$?" ; fi ; done ; \ + scp make-${CEPH_PKG_TYPE}.sh common.sh ubuntu@$$ip: ; \ + packages_repository=$(call get_ip,${> /etc/ssh/sshd_config + - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo +preserve_hostname: true +system_info: + default_user: + name: ubuntu +packages: + - dracut-modules-growroot +runcmd: + - mkinitrd --force /boot/initramfs-2.6.32-573.3.1.el6.x86_64.img 2.6.32-573.3.1.el6.x86_64 + - reboot +final_message: "READYTORUN" diff --git a/teuthology/task/buildpackages/centos-7.0-user-data.txt b/teuthology/task/buildpackages/centos-7.0-user-data.txt new file mode 120000 index 0000000000..2eb0e3c88d --- /dev/null +++ b/teuthology/task/buildpackages/centos-7.0-user-data.txt @@ -0,0 +1 @@ +user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/centos-7.1-user-data.txt b/teuthology/task/buildpackages/centos-7.1-user-data.txt new file mode 120000 index 0000000000..2eb0e3c88d --- /dev/null +++ b/teuthology/task/buildpackages/centos-7.1-user-data.txt @@ -0,0 +1 @@ +user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/centos-7.2-user-data.txt b/teuthology/task/buildpackages/centos-7.2-user-data.txt new file mode 120000 index 0000000000..2eb0e3c88d --- /dev/null +++ b/teuthology/task/buildpackages/centos-7.2-user-data.txt @@ -0,0 +1 @@ +user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/centos-7.3-user-data.txt b/teuthology/task/buildpackages/centos-7.3-user-data.txt new file mode 120000 index 0000000000..2eb0e3c88d --- /dev/null +++ b/teuthology/task/buildpackages/centos-7.3-user-data.txt @@ -0,0 +1 @@ +user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/common.sh b/teuthology/task/buildpackages/common.sh new file mode 100644 index 0000000000..4bc18adc37 --- /dev/null +++ b/teuthology/task/buildpackages/common.sh @@ -0,0 +1,169 @@ +#!/bin/bash +# +# Copyright (C) 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +function install_deps() { + if [ ! -f install-deps.sh ]; then + git archive --remote=git://git.ceph.com/ceph.git main install-deps.sh | tar -xvf - + fi + # + # drop the following hack when trusty is not supported anymore + # there is no other way as long as we maintain a debian directory that tries + # to be the same for all distributions + # + if grep --quiet 14.04 /etc/issue 2>/dev/null && sudo apt-get install --force-yes -qq -y dpkg-dev && test "$(dpkg-architecture -qDEB_BUILD_GNU_CPU 2>/dev/null)" = aarch64 ; then + sed -i -e '/libgoogle-perftools-dev/d' debian/control + fi + bash -x install-deps.sh +} + +function git_submodules() { + # see http://tracker.ceph.com/issues/13426 + perl -pi -e 's|git://ceph.com/git/ceph-object-corpus.git|https://github.com/ceph/ceph-object-corpus.git|' .gitmodules + local force=$(if git submodule usage 2>&1 | grep --quiet 'update.*--force'; then echo --force ; fi) + git submodule sync || return 1 + git submodule update $force --init --recursive || return 1 +} + +function get_ceph() { + local git_ceph_url=$1 + local sha1=$2 + + test -d ceph || git clone ${git_ceph_url} ceph + cd ceph + if test -d src ; then # so we don't try to fetch when using a fixture + if test "x$BUILDPACKAGES_CANONICAL_TAGS" != "xfalse" ; then + echo "Fetching canonical tags from http://github.com/ceph/ceph (to disable, " \ + "set BUILDPACKAGES_CANONICAL_TAGS=false in the environment)" + git fetch --tags http://github.com/ceph/ceph + fi + fi + git fetch --tags ${git_ceph_url} + git checkout ${sha1} +} + +function init_ceph() { + local git_ceph_url=$1 + local sha1=$2 + get_ceph $git_ceph_url $sha1 || return 1 + git_submodules || return 1 + install_deps || return 1 +} + +function flavor2configure() { + local flavor=$1 + + eval $(dpkg-architecture) + + if test $flavor = notcmalloc || test "$DEB_HOST_GNU_CPU" = aarch64 ; then + echo --without-tcmalloc --without-cryptopp + fi +} + +# +# for a given $sha1 in the $ceph_dir repository, lookup all references +# from the remote origin and tags matching the sha1. Add a symbolic +# link in $ref_dir to the $sha1 for each reference found. If the +# reference is a tag, also add a symbolic link to the commit to which +# the tag points, if it is an annotated tag. +# +function link_same() { + local ref_dir=$1 + local ceph_dir=$2 + local sha1=$3 + + mkdir -p $ref_dir + ( + cd ${ceph_dir} + git for-each-ref refs/tags/** refs/remotes/origin/** | grep $sha1 | \ + while read sha1 type ref ; do + if test $type = 'tag' ; then + commit_sha1=$(git rev-parse $ref^{commit}) + if test $commit_sha1 != $sha1 ; then + echo ../sha1/$sha1 ../sha1/$commit_sha1 + fi + fi + echo ../sha1/$sha1 $(basename $ref) + done + ) | while read from to ; do + ( cd $ref_dir ; ln -sf $from $to ) + done +} + +function test_link_same() { + local d=/tmp/link_same$$ + mkdir -p $d/primary + cd $d/primary + git init + touch a ; git add a ; git commit -m 'm' a + git tag tag1 + tag1=$(git rev-parse HEAD) + git branch branch1 + touch b ; git add b ; git commit -m 'm' b + git tag --annotate -m 'a' tag2 + tag2=$(git rev-parse tag2) + sha1_tag2=$(git rev-parse tag2^{commit}) + git branch branch2 + touch c ; git add c ; git commit -m 'm' c + git branch branch3 + sha1_branch3=$(git rev-parse branch3) + + git clone $d/primary $d/secondary + cd $d/secondary + mkdir $d/ref $d/sha1 + + touch $d/sha1/$sha1_branch3 + link_same $d/ref $d/secondary $sha1_branch3 + test $(readlink --canonicalize $d/ref/branch3) = $d/sha1/$sha1_branch3 || return 1 + test $(readlink --canonicalize $d/ref/main) = $d/sha1/$sha1_branch3 || return 1 + + touch $d/sha1/$tag2 + link_same $d/ref $d/secondary $tag2 + test $(readlink --canonicalize $d/ref/tag2) = $d/sha1/$tag2 || return 1 + test $(readlink --canonicalize $d/sha1/$sha1_tag2) = $d/sha1/$tag2 || return 1 + + touch $d/sha1/$tag1 + link_same $d/ref $d/secondary $tag1 + test $(readlink --canonicalize $d/ref/tag1) = $d/sha1/$tag1 || return 1 + test $(readlink --canonicalize $d/ref/branch1) = $d/sha1/$tag1 || return 1 + + rm -fr $d +} + +function maybe_parallel() { + local nproc=$1 + local vers=$2 + + if echo $vers | grep --quiet '0\.67' ; then + return + fi + + if test $nproc -gt 1 ; then + echo -j${nproc} + fi +} + +function test_maybe_parallel() { + test "$(maybe_parallel 1 0.72)" = "" || return 1 + test "$(maybe_parallel 8 0.67)" = "" || return 1 + test "$(maybe_parallel 8 0.72)" = "-j8" || return 1 +} + +if test "$1" = "TEST" ; then + shopt -s -o xtrace + PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' + test_link_same + test_maybe_parallel +fi diff --git a/teuthology/task/buildpackages/debian-8.0-user-data.txt b/teuthology/task/buildpackages/debian-8.0-user-data.txt new file mode 100644 index 0000000000..13aba98763 --- /dev/null +++ b/teuthology/task/buildpackages/debian-8.0-user-data.txt @@ -0,0 +1,12 @@ +#cloud-config +bootcmd: + - echo 'APT::Get::AllowUnauthenticated "true";' | tee /etc/apt/apt.conf.d/99disablesigs + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +preserve_hostname: true +system_info: + default_user: + name: ubuntu +runcmd: + - echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers +final_message: "READYTORUN" diff --git a/teuthology/task/buildpackages/make-deb.sh b/teuthology/task/buildpackages/make-deb.sh new file mode 100755 index 0000000000..fb7f4176d9 --- /dev/null +++ b/teuthology/task/buildpackages/make-deb.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Copyright (C) 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +# +# Create and upload a deb repository with the same naming conventions +# as https://github.com/ceph/autobuild-ceph/blob/main/build-ceph-deb.sh +# +set -xe + +base=/tmp/release +gitbuilder_host=$1 +codename=$2 +git_ceph_url=$3 +sha1=$4 +flavor=$5 +arch=$6 +canonical_tags=$7 + +sudo apt-get update +sudo apt-get install -y git + +export BUILDPACKAGES_CANONICAL_TAGS=$canonical_tags +source $(dirname $0)/common.sh + +init_ceph $git_ceph_url $sha1 + +#codename=$(lsb_release -sc) +releasedir=$base/$(lsb_release -si)/WORKDIR +# +# git describe provides a version that is +# a) human readable +# b) is unique for each commit +# c) compares higher than any previous commit +# d) contains the short hash of the commit +# +vers=$(git describe --match "v*" | sed s/^v//) +# +# always set the debian version to 1 which is ok because the debian +# directory is included in the sources and the upstream version will +# change each time it is modified. +# +dvers="$vers-1" +: ${NPROC:=$(nproc)} +ceph_dir=$(pwd) + +function build_package() { + + rm -fr $releasedir + mkdir -p $releasedir + # + # remove all files not under git so they are not + # included in the distribution. + # + git clean -qdxff + + fileext="gz" + # autotools only works in jewel and below + if [[ ! -e "make-dist" ]] ; then + # + # creating the distribution tarbal requires some configure + # options (otherwise parts of the source tree will be left out). + # + ./autogen.sh + # Building with LTTNG on Ubuntu Precise is not possible. + # It fails the LTTNG-is-sane check (it misses headers) + # And the Debian rules files leave it out anyway + case $codename in + precise) lttng_opt="--without-lttng" ;; + *) lttng_opt="--with-lttng" ;; + esac + ./configure $(flavor2configure $flavor) \ + --with-rocksdb --with-ocf \ + --with-nss --with-debug --enable-cephfs-java \ + $lttng_opt --with-babeltrace + # + # use distdir= to set the name of the top level directory of the + # tarbal to match the desired version + # + make distdir=ceph-$vers dist + else + ./make-dist + fileext="bz2" + fi + # + # rename the tarbal to match debian conventions and extract it + # + mv ceph-$vers.tar.$fileext $releasedir/ceph_$vers.orig.tar.$fileext + tar -C $releasedir -xf $releasedir/ceph_$vers.orig.tar.$fileext + # + # copy the debian directory over + # + cp -a debian $releasedir/ceph-$vers/debian + cd $releasedir + # + # uncomment to remove -dbg packages + # because they are large and take time to build + # + #perl -ni -e 'print if(!(/^Package: .*-dbg$/../^$/))' ceph-$vers/debian/control + #perl -pi -e 's/--dbg-package.*//' ceph-$vers/debian/rules + # + # update the changelog to match the desired version + # + cd ceph-$vers + local chvers=$(head -1 debian/changelog | perl -ne 's/.*\(//; s/\).*//; print') + if [ "$chvers" != "$dvers" ]; then + DEBEMAIL="contact@ceph.com" dch -D $codename --force-distribution -b -v "$dvers" "new version" + fi + # + # create the packages (with ccache) + # + export CEPH_EXTRA_CONFIGURE_ARGS=$(flavor2configure $flavor) + j=$(maybe_parallel $NPROC $vers) + PATH=/usr/lib/ccache:$PATH dpkg-buildpackage $j -uc -us -sa +} + +function build_repo() { + local gitbuilder_host=$1 + + sudo apt-get install -y reprepro + cd ${releasedir}/.. + # + # Create a repository in a directory with a name structured + # as + # + base=ceph-deb-$codename-$arch-$flavor + sha1_dir=$codename/$base/sha1/$sha1 + mkdir -p $sha1_dir/conf + cat > $sha1_dir/conf/distributions < $sha1_dir/version + echo $sha1 > $sha1_dir/sha1 + link_same $codename/$base/ref $ceph_dir $sha1 + if test "$gitbuilder_host" ; then + cd $codename + sudo apt-get install -y rsync + RSYNC_RSH='ssh -o StrictHostKeyChecking=false' rsync -av $base/ $gitbuilder_host:/usr/share/nginx/html/$base/ + fi +} + +build_package +build_repo $gitbuilder_host diff --git a/teuthology/task/buildpackages/make-rpm.sh b/teuthology/task/buildpackages/make-rpm.sh new file mode 100755 index 0000000000..11cac70000 --- /dev/null +++ b/teuthology/task/buildpackages/make-rpm.sh @@ -0,0 +1,294 @@ +#!/bin/bash +# +# Copyright (C) 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +# +# Create and upload a RPM repository with the same naming conventions +# as https://github.com/ceph/autobuild-ceph/blob/main/build-ceph-rpm.sh +# + +set -xe + +base=/tmp/release +gitbuilder_host=$1 +codename=$2 +git_ceph_url=$3 +sha1=$4 +flavor=$5 +arch=$6 +canonical_tags=$7 + +suse=false +[[ $codename =~ suse ]] && suse=true +[[ $codename =~ sle ]] && suse=true + +CREATEREPO=createrepo +if [ "$suse" = true ] ; then + source /etc/os-release + majorvers=$(echo $VERSION_ID | cut -d \. -f 1-1) + test $majorvers -ge 15 && CREATEREPO=createrepo_c + for delay in 60 60 60 60 ; do + sudo zypper --non-interactive --no-gpg-checks refresh && break + sleep $delay + done + sudo zypper --non-interactive install --no-recommends git $CREATEREPO +else + sudo yum install -y git $CREATEREPO +fi + +export BUILDPACKAGES_CANONICAL_TAGS=$canonical_tags +source $(dirname $0)/common.sh + +init_ceph $git_ceph_url $sha1 + +distro=$( source /etc/os-release ; echo $ID ) +distro_version=$( source /etc/os-release ; echo $VERSION ) +releasedir=$base/$distro/WORKDIR +# +# git describe provides a version that is +# a) human readable +# b) is unique for each commit +# c) compares higher than any previous commit +# WAIT, c) DOES NOT HOLD: +# >>> print 'v10.2.5-7-g000000' < 'v10.2.5-8-g000000' +# True +# >>> print 'v10.2.5-9-g000000' < 'v10.2.5-10-g000000' +# False +# d) contains the short hash of the commit +# +# Regardless, we use it for the RPM version number, but strip the leading 'v' +# and replace the '-' before the 'g000000' with a '.' to match the output of +# "rpm -q $PKG --qf %{VERSION}-%{RELEASE}" +# +vers=$(git describe --match "v*" | sed -r -e 's/^v//' -e 's/\-([[:digit:]]+)\-g/\-\1\.g/') +ceph_dir=$(pwd) + +# +# Create a repository in a directory with a name structured +# as +# +base=ceph-rpm-$codename-$arch-$flavor + +function setup_rpmmacros() { + if ! grep -q find_debuginfo_dwz_opts $HOME/.rpmmacros ; then + echo '%_find_debuginfo_dwz_opts %{nil}' >> $HOME/.rpmmacros + fi + if [ "x${distro}x" = "xcentosx" ] && echo $distro_version | grep -q '7' ; then + if ! grep -q '%dist .el7' $HOME/.rpmmacros ; then + echo '%dist .el7' >> $HOME/.rpmmacros + fi + fi +} + +function build_package() { + rm -fr $releasedir + mkdir -p $releasedir + # + # remove all files not under git so they are not + # included in the distribution. + # + git clean -qdxff + # autotools only works in jewel and below + if [[ ! -e "make-dist" ]] ; then + # lsb-release is required by install-deps.sh + # which is required by autogen.sh + if [ "$suse" = true ] ; then + sudo zypper -n install bzip2 lsb-release which + else + sudo yum install -y bzip2 redhat-lsb-core which + fi + ./autogen.sh + # + # creating the distribution tarball requires some configure + # options (otherwise parts of the source tree will be left out). + # + ./configure $(flavor2configure $flavor) --with-debug --with-radosgw --with-fuse --with-libatomic-ops --with-gtk2 --with-nss + + # + # use distdir= to set the name of the top level directory of the + # tarbal to match the desired version + # + make dist-bzip2 + else + # kraken and above + ./make-dist + fi + # Set up build area + setup_rpmmacros + if [ "$suse" = true ] ; then + sudo zypper -n install rpm-build + else + sudo yum install -y rpm-build + fi + local buildarea=$releasedir + mkdir -p ${buildarea}/SOURCES + mkdir -p ${buildarea}/SRPMS + mkdir -p ${buildarea}/SPECS + cp ceph.spec ${buildarea}/SPECS + mkdir -p ${buildarea}/RPMS + mkdir -p ${buildarea}/BUILD + CEPH_TARBALL=( ceph-*.tar.bz2 ) + CEPH_TARBALL_BASE=$(echo $CEPH_TARBALL | sed -e 's/.tar.bz2$//') + CEPH_VERSION=$(echo $CEPH_TARBALL_BASE | cut -d - -f 2-2) + CEPH_RELEASE=$(echo $CEPH_TARBALL_BASE | cut -d - -f 3- | tr - .) + cp -a $CEPH_TARBALL ${buildarea}/SOURCES/. + cp -a rpm/*.patch ${buildarea}/SOURCES || true + ( + cd ${buildarea}/SPECS + ccache=$(echo /usr/lib*/ccache) + if [ "$suse" = true ]; then + sed -i \ + -e '0,/%package/s//%debug_package\n\n&/' \ + -e 's/%bcond_with ceph_test_package/%bcond_without ceph_test_package/g' \ + -e "s/^Version:.*/Version: $CEPH_VERSION/g" \ + -e "s/^Release:.*/Release: $CEPH_RELEASE/g" \ + -e "s/^Source0:.*/Source0: $CEPH_TARBALL/g" \ + -e '/^Source9/d' \ + -e "s/^%autosetup -p1.*/%autosetup -p1 -n $CEPH_TARBALL_BASE/g" \ + ceph.spec + fi + cat ceph.spec + buildarea=`readlink -fn ${releasedir}` ### rpm wants absolute path + PATH=$ccache:$PATH rpmbuild -ba --nosignature \ + --define '_srcdefattr (-,root,root)' \ + --define "_unpackaged_files_terminate_build 0" \ + --define "_topdir ${buildarea}" \ + ceph.spec + ) +} + +function build_rpm_release() { + local buildarea=$1 + local sha1=$2 + local gitbuilder_host=$3 + local base=$4 + + cat < ${buildarea}/SPECS/ceph-release.spec +Name: ceph-release +Version: 1 +Release: 0%{?dist} +Summary: Ceph repository configuration +Group: System Environment/Base +License: GPLv2 +URL: http://gitbuilder.ceph.com/$dist +Source0: ceph.repo +#Source0: RPM-GPG-KEY-CEPH +#Source1: ceph.repo +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildArch: noarch + +%description +This package contains the Ceph repository GPG key as well as configuration +for yum and up2date. + +%prep + +%setup -q -c -T +install -pm 644 %{SOURCE0} . +#install -pm 644 %{SOURCE1} . + +%build + +%install +rm -rf %{buildroot} +#install -Dpm 644 %{SOURCE0} \ +# %{buildroot}/%{_sysconfdir}/pki/rpm-gpg/RPM-GPG-KEY-CEPH +install -dm 755 %{buildroot}/%{_sysconfdir}/yum.repos.d +install -pm 644 %{SOURCE0} \ + %{buildroot}/%{_sysconfdir}/yum.repos.d + +%clean +#rm -rf %{buildroot} + +%post + +%postun + +%files +%defattr(-,root,root,-) +#%doc GPL +/etc/yum.repos.d/* +#/etc/pki/rpm-gpg/* + +%changelog +* Tue Mar 12 2013 Gary Lowell - 1-0 +- Handle both yum and zypper +- Use URL to ceph git repo for key +- remove config attribute from repo file +* Tue Aug 28 2012 Gary Lowell - 1-0 +- Initial Package +EOF + + cat < $buildarea/SOURCES/ceph.repo +[Ceph] +name=Ceph packages for \$basearch +baseurl=http://${gitbuilder_host}/${base}/sha1/${sha1}/\$basearch +enabled=1 +gpgcheck=0 +type=rpm-md + +[Ceph-noarch] +name=Ceph noarch packages +baseurl=http://${gitbuilder_host}/${base}/sha1/${sha1}/noarch +enabled=1 +gpgcheck=0 +type=rpm-md + +[ceph-source] +name=Ceph source packages +baseurl=http://${gitbuilder_host}/${base}/sha1/${sha1}/SRPMS +enabled=1 +gpgcheck=0 +type=rpm-md +EOF + + rpmbuild -bb --define "_topdir ${buildarea}" ${buildarea}/SPECS/ceph-release.spec +} + +function build_rpm_repo() { + local buildarea=$1 + local gitbuilder_host=$2 + local base=$3 + + for dir in ${buildarea}/SRPMS ${buildarea}/RPMS/* + do + $CREATEREPO ${dir} + done + + local sha1_dir=${buildarea}/../$codename/$base/sha1/$sha1 + mkdir -p $sha1_dir + echo $vers > $sha1_dir/version + echo $sha1 > $sha1_dir/sha1 + echo ceph > $sha1_dir/name + + for dir in ${buildarea}/SRPMS ${buildarea}/RPMS/* + do + cp -fla ${dir} $sha1_dir + done + + link_same ${buildarea}/../$codename/$base/ref $ceph_dir $sha1 + if test "$gitbuilder_host" ; then + ( + cd ${buildarea}/../$codename + RSYNC_RSH='ssh -o StrictHostKeyChecking=false' rsync -av $base/ ubuntu@$gitbuilder_host:/usr/share/nginx/html/$base/ + ) + fi +} + +setup_rpmmacros +build_package +build_rpm_release $releasedir $sha1 $gitbuilder_host $base +build_rpm_repo $releasedir $gitbuilder_host $base diff --git a/teuthology/task/buildpackages/opensuse-15.0-user-data.txt b/teuthology/task/buildpackages/opensuse-15.0-user-data.txt new file mode 100644 index 0000000000..8b9e2244c4 --- /dev/null +++ b/teuthology/task/buildpackages/opensuse-15.0-user-data.txt @@ -0,0 +1,16 @@ +#cloud-config +bootcmd: + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +preserve_hostname: true +users: + - name: ubuntu + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh ) + - zypper --non-interactive --no-gpg-checks rm gettext-runtime-mini grub2 grub2-branding-openSUSE grub2-i386-pc grub2-snapper-plugin grub2-systemd-sleep-plugin + - zypper --non-interactive --no-gpg-checks install --no-recommends wget git-core rsyslog lsb-release make gcc gcc-c++ grub2 rpm-build + - sleep 30 +final_message: "READYTORUN" diff --git a/teuthology/task/buildpackages/opensuse-42.1-user-data.txt b/teuthology/task/buildpackages/opensuse-42.1-user-data.txt new file mode 100644 index 0000000000..190cac2b1e --- /dev/null +++ b/teuthology/task/buildpackages/opensuse-42.1-user-data.txt @@ -0,0 +1,13 @@ +#cloud-config +bootcmd: + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +preserve_hostname: true +users: + - name: ubuntu + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh ) +final_message: "READYTORUN" diff --git a/teuthology/task/buildpackages/opensuse-42.2-user-data.txt b/teuthology/task/buildpackages/opensuse-42.2-user-data.txt new file mode 100644 index 0000000000..fd35c9db02 --- /dev/null +++ b/teuthology/task/buildpackages/opensuse-42.2-user-data.txt @@ -0,0 +1,14 @@ +#cloud-config +bootcmd: + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +preserve_hostname: true +users: + - name: ubuntu + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh ) + - 'zypper rr openSUSE-Leap-Cloud-Tools || :' +final_message: "READYTORUN" diff --git a/teuthology/task/buildpackages/opensuse-42.3-user-data.txt b/teuthology/task/buildpackages/opensuse-42.3-user-data.txt new file mode 120000 index 0000000000..1aa71c4069 --- /dev/null +++ b/teuthology/task/buildpackages/opensuse-42.3-user-data.txt @@ -0,0 +1 @@ +opensuse-42.2-user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/sle-12.1-user-data.txt b/teuthology/task/buildpackages/sle-12.1-user-data.txt new file mode 100644 index 0000000000..b3edb878a0 --- /dev/null +++ b/teuthology/task/buildpackages/sle-12.1-user-data.txt @@ -0,0 +1,14 @@ +#cloud-config +bootcmd: + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +preserve_hostname: true +users: + - name: ubuntu + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh ) + - zypper --non-interactive install --no-recommends python wget git ntp rsyslog lsb-release +final_message: "READYTORUN" diff --git a/teuthology/task/buildpackages/sle-12.2-user-data.txt b/teuthology/task/buildpackages/sle-12.2-user-data.txt new file mode 120000 index 0000000000..d3697ebdf7 --- /dev/null +++ b/teuthology/task/buildpackages/sle-12.2-user-data.txt @@ -0,0 +1 @@ +sle-12.1-user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/sle-12.3-user-data.txt b/teuthology/task/buildpackages/sle-12.3-user-data.txt new file mode 120000 index 0000000000..d3697ebdf7 --- /dev/null +++ b/teuthology/task/buildpackages/sle-12.3-user-data.txt @@ -0,0 +1 @@ +sle-12.1-user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/sle-15.0-user-data.txt b/teuthology/task/buildpackages/sle-15.0-user-data.txt new file mode 100644 index 0000000000..b837125c8f --- /dev/null +++ b/teuthology/task/buildpackages/sle-15.0-user-data.txt @@ -0,0 +1,14 @@ +#cloud-config +bootcmd: + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +preserve_hostname: true +users: + - name: ubuntu + gecos: User + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: users +runcmd: + - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh ) + - zypper --non-interactive --no-gpg-checks install --no-recommends wget git-core rsyslog lsb-release +final_message: "READYTORUN" diff --git a/teuthology/task/buildpackages/ubuntu-12.04-user-data.txt b/teuthology/task/buildpackages/ubuntu-12.04-user-data.txt new file mode 120000 index 0000000000..2eb0e3c88d --- /dev/null +++ b/teuthology/task/buildpackages/ubuntu-12.04-user-data.txt @@ -0,0 +1 @@ +user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/ubuntu-14.04-user-data.txt b/teuthology/task/buildpackages/ubuntu-14.04-user-data.txt new file mode 120000 index 0000000000..2eb0e3c88d --- /dev/null +++ b/teuthology/task/buildpackages/ubuntu-14.04-user-data.txt @@ -0,0 +1 @@ +user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/ubuntu-16.04-user-data.txt b/teuthology/task/buildpackages/ubuntu-16.04-user-data.txt new file mode 120000 index 0000000000..2eb0e3c88d --- /dev/null +++ b/teuthology/task/buildpackages/ubuntu-16.04-user-data.txt @@ -0,0 +1 @@ +user-data.txt \ No newline at end of file diff --git a/teuthology/task/buildpackages/user-data.txt b/teuthology/task/buildpackages/user-data.txt new file mode 100644 index 0000000000..d5016929da --- /dev/null +++ b/teuthology/task/buildpackages/user-data.txt @@ -0,0 +1,10 @@ +#cloud-config +bootcmd: + - echo 'APT::Get::AllowUnauthenticated "true";' | tee /etc/apt/apt.conf.d/99disablesigs + - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver +manage_etc_hosts: true +preserve_hostname: true +system_info: + default_user: + name: ubuntu +final_message: "READYTORUN" diff --git a/teuthology/task/ceph_ansible.py b/teuthology/task/ceph_ansible.py new file mode 100644 index 0000000000..0e7d483c30 --- /dev/null +++ b/teuthology/task/ceph_ansible.py @@ -0,0 +1,500 @@ +import json +import os +import re +import logging +import yaml + +from teuthology.task import Task +from tempfile import NamedTemporaryFile +from teuthology.config import config as teuth_config +from teuthology.misc import get_scratch_devices, get_file +from teuthology import contextutil +from teuthology.orchestra import run +from teuthology import misc +log = logging.getLogger(__name__) + + +class CephAnsible(Task): + name = 'ceph_ansible' + + __doc__ = """ + A task to setup ceph cluster using ceph-ansible + + - ceph-ansible: + repo: {git_base}ceph-ansible.git + branch: mybranch # defaults to main + ansible-version: 2.4 # defaults to 2.5 + vars: + ceph_dev: True ( default) + ceph_conf_overrides: + global: + mon pg warn min per osd: 2 + + It always uses a dynamic inventory. + + It will optionally do the following automatically based on ``vars`` that + are passed in: + * Set ``devices`` for each host if ``osd_auto_discovery`` is not True + * Set ``monitor_interface`` for each host if ``monitor_interface`` is + unset + * Set ``public_network`` for each host if ``public_network`` is unset + + The machine that ceph-ansible runs on can be specified using the + installer.0 role. If installer.0 is not used, the first mon will be the + machine on which ceph-ansible runs. + """.format(git_base=teuth_config.ceph_git_base_url) + + groups_to_roles = dict( + mons='mon', + mgrs='mgr', + mdss='mds', + osds='osd', + rgws='rgw', + clients='client', + nfss='nfs', + ) + + def __init__(self, ctx, config): + super(CephAnsible, self).__init__(ctx, config) + config = self.config or dict() + self.playbook = None + if 'playbook' in config: + self.playbook = self.config['playbook'] + if 'repo' not in config: + self.config['repo'] = os.path.join(teuth_config.ceph_git_base_url, + 'ceph-ansible.git') + # default vars to dev builds + if 'vars' not in config: + vars = dict() + config['vars'] = vars + vars = config['vars'] + if 'ceph_dev' not in vars: + vars['ceph_dev'] = True + if 'ceph_dev_key' not in vars: + vars['ceph_dev_key'] = 'https://download.ceph.com/keys/autobuild.asc' + if 'ceph_dev_branch' not in vars: + vars['ceph_dev_branch'] = ctx.config.get('branch', 'main') + self.cluster_name = vars.get('cluster', 'ceph') + + def setup(self): + super(CephAnsible, self).setup() + # generate hosts file based on test config + self.generate_hosts_file() + # generate playbook file if it exists in config + self.playbook_file = None + if self.playbook is not None: + playbook_file = NamedTemporaryFile( + prefix="ceph_ansible_playbook_", dir='/tmp/', + delete=False, + ) + yaml.safe_dump(self.playbook, playbook_file, explicit_start=True) + playbook_file.flush() + self.playbook_file = playbook_file.name + # everything from vars in config go into group_vars/all file + extra_vars = dict() + extra_vars.update(self.config.get('vars', dict())) + gvar = yaml.dump(extra_vars, default_flow_style=False) + self.extra_vars_file = self._write_hosts_file(prefix='teuth_ansible_gvar', + content=gvar) + + def execute_playbook(self): + """ + Execute ansible-playbook + + :param _logfile: Use this file-like object instead of a LoggerFile for + testing + """ + + args = [ + 'ANSIBLE_STDOUT_CALLBACK=debug', + 'ansible-playbook', '-vv', + '-i', 'inven.yml', 'site.yml' + ] + log.debug("Running %s", args) + # If there is an installer.0 node, use that for the installer. + # Otherwise, use the first mon node as installer node. + ansible_loc = self.ctx.cluster.only('installer.0') + (ceph_first_mon,) = self.ctx.cluster.only( + misc.get_first_mon(self.ctx, + self.config)).remotes.keys() + if ansible_loc.remotes: + (ceph_installer,) = ansible_loc.remotes.keys() + else: + ceph_installer = ceph_first_mon + self.ceph_first_mon = ceph_first_mon + self.ceph_installer = ceph_installer + self.args = args + if self.config.get('rhbuild'): + self.run_rh_playbook() + else: + self.run_playbook() + + def generate_hosts_file(self): + hosts_dict = dict() + for group in sorted(self.groups_to_roles.keys()): + role_prefix = self.groups_to_roles[group] + want = lambda role: role.startswith(role_prefix) + for (remote, roles) in self.cluster.only(want).remotes.items(): + hostname = remote.hostname + host_vars = self.get_host_vars(remote) + if group not in hosts_dict: + hosts_dict[group] = {hostname: host_vars} + elif hostname not in hosts_dict[group]: + hosts_dict[group][hostname] = host_vars + + hosts_content = '' + for group in sorted(hosts_dict.keys()): + hosts_content += '[%s]\n' % group + for hostname in sorted(hosts_dict[group].keys()): + vars = hosts_dict[group][hostname] + if vars: + vars_list = [] + for key in sorted(vars.keys()): + vars_list.append( + "%s='%s'" % (key, json.dumps(vars[key]).strip('"')) + ) + host_line = "{hostname} {vars}".format( + hostname=hostname, + vars=' '.join(vars_list), + ) + else: + host_line = hostname + hosts_content += '%s\n' % host_line + hosts_content += '\n' + self.inventory = self._write_hosts_file(prefix='teuth_ansible_hosts_', + content=hosts_content.strip()) + self.generated_inventory = True + + def begin(self): + super(CephAnsible, self).begin() + self.execute_playbook() + + def _write_hosts_file(self, prefix, content): + """ + Actually write the hosts file + """ + hosts_file = NamedTemporaryFile(prefix=prefix, mode='w+', + delete=False) + hosts_file.write(content) + hosts_file.flush() + return hosts_file.name + + def teardown(self): + log.info("Cleaning up temporary files") + os.remove(self.inventory) + if self.playbook is not None: + os.remove(self.playbook_file) + os.remove(self.extra_vars_file) + # collect logs + self.collect_logs() + # run purge-cluster that teardowns the cluster + args = [ + 'ANSIBLE_STDOUT_CALLBACK=debug', + 'ansible-playbook', '-vv', + '-e', 'ireallymeanit=yes', + '-i', 'inven.yml', 'purge-cluster.yml' + ] + log.debug("Running %s", args) + str_args = ' '.join(args) + installer_node = self.ceph_installer + # copy purge-cluster playbook from infra dir to top level dir + # as required by ceph-ansible + installer_node.run( + args=[ + 'cp', + run.Raw('~/ceph-ansible/infrastructure-playbooks/purge-cluster.yml'), + run.Raw('~/ceph-ansible/'), + ] + ) + if self.config.get('rhbuild'): + installer_node.run( + args=[ + run.Raw('cd ~/ceph-ansible'), + run.Raw(';'), + run.Raw(str_args) + ] + ) + else: + installer_node.run( + args=[ + run.Raw('cd ~/ceph-ansible'), + run.Raw(';'), + run.Raw('source venv/bin/activate'), + run.Raw(';'), + run.Raw(str_args) + ] + ) + # cleanup the ansible ppa repository we added + # and also remove the dependency pkgs we installed + if installer_node.os.package_type == 'deb': + installer_node.run(args=[ + 'sudo', + 'add-apt-repository', + '--remove', + run.Raw('ppa:ansible/ansible'), + ]) + installer_node.run(args=[ + 'sudo', + 'apt-get', + 'update', + ]) + installer_node.run(args=[ + 'sudo', + 'apt-get', + 'remove', + '-y', + 'ansible', + 'libssl-dev', + 'libffi-dev', + 'python-dev' + ]) + + def collect_logs(self): + ctx = self.ctx + if ctx.archive is not None and \ + not (ctx.config.get('archive-on-error') and ctx.summary['success']): + log.info('Archiving logs...') + path = os.path.join(ctx.archive, 'remote') + os.makedirs(path) + + def wanted(role): + # Only attempt to collect logs from hosts which are part of the + # cluster + return any(map( + lambda role_stub: role.startswith(role_stub), + self.groups_to_roles.values(), + )) + for remote in ctx.cluster.only(wanted).remotes.keys(): + sub = os.path.join(path, remote.shortname) + os.makedirs(sub) + misc.pull_directory(remote, '/var/log/ceph', + os.path.join(sub, 'log')) + + def wait_for_ceph_health(self): + with contextutil.safe_while(sleep=15, tries=6, + action='check health') as proceed: + (remote,) = self.ctx.cluster.only('mon.a').remotes + remote.run(args=[ + 'sudo', 'ceph', '--cluster', self.cluster_name, 'osd', 'tree' + ]) + remote.run(args=[ + 'sudo', 'ceph', '--cluster', self.cluster_name, '-s' + ]) + log.info("Waiting for Ceph health to reach HEALTH_OK \ + or HEALTH WARN") + while proceed(): + out = remote.sh('sudo ceph --cluster %s health' % self.cluster_name) + state = out.split(None, 1)[0] + log.info("cluster in state: %s", state) + if state in ('HEALTH_OK', 'HEALTH_WARN'): + break + + def get_host_vars(self, remote): + extra_vars = self.config.get('vars', dict()) + host_vars = dict() + if not extra_vars.get('osd_auto_discovery', False): + roles = self.ctx.cluster.remotes[remote] + dev_needed = len([role for role in roles + if role.startswith('osd')]) + if ( + teuth_config.get('ceph_ansible') and + hasattr(self.ctx, "machine_type") and + self.ctx.machine_type in teuth_config['ceph_ansible']['has_lvm_scratch_disks'] + ): + devices = get_file(remote, "/scratch_devs").decode().split() + vols = [] + + for dev in devices: + if 'vg_nvme' in dev: + splitpath = dev.split('/') + vol = dict() + vol['data_vg'] = splitpath[2] + vol['data'] = splitpath[3] + vols.append(vol) + extra_vars['lvm_volumes'] = vols + self.config.update({'vars': extra_vars}) + else: + host_vars['devices'] = get_scratch_devices(remote)[0:dev_needed] + if 'monitor_interface' not in extra_vars: + host_vars['monitor_interface'] = remote.interface + if 'radosgw_interface' not in extra_vars: + host_vars['radosgw_interface'] = remote.interface + if 'public_network' not in extra_vars: + host_vars['public_network'] = remote.cidr + return host_vars + + def run_rh_playbook(self): + ceph_installer = self.ceph_installer + args = self.args + ceph_installer.run(args=[ + 'cp', + '-R', + '/usr/share/ceph-ansible', + '.' + ]) + self._copy_and_print_config() + str_args = ' '.join(args) + out = ceph_installer.sh( + [ + 'cd', + 'ceph-ansible', + run.Raw(';'), + run.Raw(str_args) + ], + timeout=4200, + check_status=False, + ) + log.info(out) + if re.search(r'all hosts have already failed', out): + log.error("Failed during ceph-ansible execution") + raise CephAnsibleError("Failed during ceph-ansible execution") + self._create_rbd_pool() + + def run_playbook(self): + # setup ansible on first mon node + ceph_installer = self.ceph_installer + args = self.args + if ceph_installer.os.package_type == 'deb': + # update ansible from ppa + ceph_installer.run(args=[ + 'sudo', + 'add-apt-repository', + run.Raw('ppa:ansible/ansible'), + ]) + ceph_installer.run(args=[ + 'sudo', + 'apt-get', + 'update', + ]) + ceph_installer.run(args=[ + 'sudo', + 'apt-get', + 'install', + '-y', + 'ansible', + 'libssl-dev', + 'python-openssl', + 'libffi-dev', + 'python-dev' + ]) + ansible_repo = self.config['repo'] + branch = 'main' + if self.config.get('branch'): + branch = self.config.get('branch') + ansible_ver = 'ansible==2.5' + if self.config.get('ansible-version'): + ansible_ver = 'ansible==' + self.config.get('ansible-version') + ceph_installer.run( + args=[ + 'rm', + '-rf', + run.Raw('~/ceph-ansible'), + ], + check_status=False + ) + ceph_installer.run(args=[ + 'mkdir', + run.Raw('~/ceph-ansible'), + run.Raw(';'), + 'git', + 'clone', + run.Raw('-b %s' % branch), + run.Raw(ansible_repo), + ]) + self._copy_and_print_config() + str_args = ' '.join(args) + ceph_installer.run(args=[ + run.Raw('cd ~/ceph-ansible'), + run.Raw(';'), + 'virtualenv', + run.Raw('--python=python3'), + 'venv', + run.Raw(';'), + run.Raw('source venv/bin/activate'), + run.Raw(';'), + 'pip', + 'install', + '--upgrade', + 'pip', + run.Raw(';'), + 'pip', + 'install', + '--upgrade', + 'cryptography>=2.5', + run.Raw(';'), + 'pip', + 'install', + run.Raw('setuptools>=11.3'), + run.Raw('notario>=0.0.13'), # FIXME: use requirements.txt + run.Raw('netaddr'), + run.Raw('six'), + run.Raw(';'), + 'LANG=en_US.utf8', + 'pip', + 'install', + run.Raw(ansible_ver), + run.Raw(';'), + run.Raw(str_args) + ]) + wait_for_health = self.config.get('wait-for-health', True) + if wait_for_health: + self.wait_for_ceph_health() + # for the teuthology workunits to work we + # need to fix the permission on keyring to be readable by them + self._create_rbd_pool() + self.fix_keyring_permission() + + def _copy_and_print_config(self): + ceph_installer = self.ceph_installer + # copy the inventory file to installer node + ceph_installer.put_file(self.inventory, 'ceph-ansible/inven.yml') + # copy the config provided site file or use sample + if self.playbook_file is not None: + ceph_installer.put_file(self.playbook_file, 'ceph-ansible/site.yml') + else: + # use the site.yml.sample provided by the repo as the main site.yml file + ceph_installer.run( + args=[ + 'cp', + 'ceph-ansible/site.yml.sample', + 'ceph-ansible/site.yml' + ] + ) + # copy extra vars to groups/all + ceph_installer.put_file(self.extra_vars_file, 'ceph-ansible/group_vars/all') + # print for debug info + ceph_installer.run(args=['cat', 'ceph-ansible/inven.yml']) + ceph_installer.run(args=['cat', 'ceph-ansible/site.yml']) + ceph_installer.run(args=['cat', 'ceph-ansible/group_vars/all']) + + def _create_rbd_pool(self): + mon_node = self.ceph_first_mon + log.info('Creating RBD pool') + mon_node.run( + args=[ + 'sudo', 'ceph', '--cluster', self.cluster_name, + 'osd', 'pool', 'create', 'rbd', '128', '128'], + check_status=False) + mon_node.run( + args=[ + 'sudo', 'ceph', '--cluster', self.cluster_name, + 'osd', 'pool', 'application', 'enable', + 'rbd', 'rbd', '--yes-i-really-mean-it' + ], + check_status=False) + + def fix_keyring_permission(self): + clients_only = lambda role: role.startswith('client') + for client in self.cluster.only(clients_only).remotes.keys(): + client.run(args=[ + 'sudo', + 'chmod', + run.Raw('o+r'), + '/etc/ceph/%s.client.admin.keyring' % self.cluster_name + ]) + + +class CephAnsibleError(Exception): + pass + +task = CephAnsible diff --git a/teuthology/task/cephmetrics.py b/teuthology/task/cephmetrics.py new file mode 100644 index 0000000000..0de36e303d --- /dev/null +++ b/teuthology/task/cephmetrics.py @@ -0,0 +1,95 @@ +import logging +import os +import pexpect +import time + +from teuthology.config import config as teuth_config +from teuthology.exceptions import CommandFailedError + +from teuthology.ansible import Ansible, LoggerFile + +log = logging.getLogger(__name__) + + +class CephMetrics(Ansible): + def __init__(self, ctx, config): + super(CephMetrics, self).__init__(ctx, config) + if 'repo' not in self.config: + self.config['repo'] = os.path.join( + teuth_config.ceph_git_base_url, 'cephmetrics.git') + if 'playbook' not in self.config: + self.config['playbook'] = './ansible/playbook.yml' + + def get_inventory(self): + return False + + def generate_inventory(self): + groups_to_roles = { + 'mons': 'mon', + 'mgrs': 'mgr', + 'mdss': 'mds', + 'osds': 'osd', + 'rgws': 'rgw', + 'clients': 'client', + 'ceph-grafana': 'cephmetrics', + } + hosts_dict = dict() + for group in sorted(groups_to_roles.keys()): + role_prefix = groups_to_roles[group] + want = lambda role: role.startswith(role_prefix) + if group not in hosts_dict: + hosts_dict[group] = dict(hosts=dict()) + group_dict = hosts_dict[group]['hosts'] + for (remote, roles) in self.cluster.only(want).remotes.items(): + hostname = remote.hostname + group_dict[hostname] = dict( + ansible_user=remote.user, + ) + hosts_dict[group]['hosts'] = group_dict + # It might be preferable to use a YAML inventory file, but + # that won't work until an ansible release is out with: + # https://github.com/ansible/ansible/pull/30730 + # Once that is done, we can simply do this: + # hosts_str = yaml.safe_dump(hosts_dict, default_flow_style=False) + # And then pass suffix='.yml' to _write_hosts_file(). + hosts_lines = [] + for group in hosts_dict.keys(): + hosts_lines.append('[%s]' % group) + for host, vars_ in hosts_dict[group]['hosts'].items(): + host_line = ' '.join( + [host] + map( + lambda tuple_: '='.join(tuple_), + vars_.items(), + ) + ) + hosts_lines.append(host_line) + hosts_lines.append('') + hosts_str = '\n'.join(hosts_lines) + self.inventory = self._write_inventory_files(hosts_str) + self.generated_inventory = True + + def begin(self): + super(CephMetrics, self).begin() + wait_time = 5 * 60 + self.log.info( + "Waiting %ss for data collection before running tests...", + wait_time, + ) + time.sleep(wait_time) + self.run_tests() + + def run_tests(self): + self.log.info("Running tests...") + command = "tox -e integration %s" % self.inventory + out, status = pexpect.run( + command, + cwd=self.repo_path, + logfile=LoggerFile(self.log.getChild('tests'), logging.INFO), + withexitstatus=True, + timeout=None, + ) + if status != 0: + raise CommandFailedError(command, status) + + +task = CephMetrics diff --git a/teuthology/task/clock.py b/teuthology/task/clock.py new file mode 100644 index 0000000000..982eb8e1bd --- /dev/null +++ b/teuthology/task/clock.py @@ -0,0 +1,122 @@ +""" +Clock synchronizer +""" +import logging +import contextlib + +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def filter_out_containers(cluster): + """ + Returns a cluster that excludes remotes which should skip this task. + Currently, only skips containerized remotes. + """ + return cluster.filter(lambda r: not r.is_container) + +@contextlib.contextmanager +def task(ctx, config): + """ + Sync or skew clock + + This will initially sync the clocks. Eventually it should let us also + skew by some number of seconds. + + example:: + + tasks: + - clock: + - ceph: + - interactive: + + to sync. + + :param ctx: Context + :param config: Configuration + """ + + log.info('Syncing clocks and checking initial clock skew...') + cluster = filter_out_containers(ctx.cluster) + run.wait( + cluster.run( + args = [ + 'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'), + 'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'), + 'sudo', 'systemctl', 'stop', 'chronyd.service', + run.Raw(';'), + 'sudo', 'ntpd', '-gq', run.Raw('||'), + 'sudo', 'chronyc', 'makestep', + run.Raw(';'), + 'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'), + 'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'), + 'sudo', 'systemctl', 'start', 'chronyd.service', + run.Raw(';'), + 'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'), + 'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources', + run.Raw('||'), + 'true' + ], + timeout = 360, + wait=False, + ) + ) + + try: + yield + + finally: + log.info('Checking final clock skew...') + cluster = filter_out_containers(ctx.cluster) + run.wait( + cluster.run( + args=[ + 'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'), + 'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources', + run.Raw('||'), + 'true' + ], + wait=False, + ) + ) + + +@contextlib.contextmanager +def check(ctx, config): + """ + Run ntpq at the start and the end of the task. + + :param ctx: Context + :param config: Configuration + """ + log.info('Checking initial clock skew...') + cluster = filter_out_containers(ctx.cluster) + run.wait( + cluster.run( + args=[ + 'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'), + 'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources', + run.Raw('||'), + 'true' + ], + wait=False, + ) + ) + + try: + yield + + finally: + log.info('Checking final clock skew...') + cluster = filter_out_containers(ctx.cluster) + run.wait( + cluster.run( + args=[ + 'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'), + 'PATH=/usr/bin:/usr/sbin', 'chronyc', 'sources', + run.Raw('||'), + 'true' + ], + wait=False, + ) + ) diff --git a/teuthology/task/common_fs_utils.py b/teuthology/task/common_fs_utils.py new file mode 100644 index 0000000000..584897968a --- /dev/null +++ b/teuthology/task/common_fs_utils.py @@ -0,0 +1,123 @@ +""" +Common filesystem related utilities. Originally this +code was part of rbd.py. It was broken out so that it +could be used by other modules (tgt.py and iscsi.py for instance). +""" +import logging +import contextlib +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + + +def default_image_name(role): + """ + Image name used by rbd and iscsi + """ + return 'testimage.{role}'.format(role=role) + + +@contextlib.contextmanager +def generic_mkfs(ctx, config, devname_rtn): + """ + Create a filesystem (either rbd or tgt, depending on devname_rtn) + + Rbd for example, now makes the following calls: + - rbd.create_image: [client.0] + - rbd.modprobe: [client.0] + - rbd.dev_create: [client.0] + - common_fs_utils.generic_mkfs: [client.0] + - common_fs_utils.generic_mount: + client.0: testimage.client.0 + """ + assert isinstance(config, list) or isinstance(config, dict), \ + "task mkfs must be configured with a list or dictionary" + if isinstance(config, dict): + images = config.items() + else: + images = [(role, None) for role in config] + + for role, properties in images: + if properties is None: + properties = {} + (remote,) = ctx.cluster.only(role).remotes.keys() + image = properties.get('image_name', default_image_name(role)) + fs_type = properties.get('fs_type', 'ext3') + remote.run( + args=[ + 'sudo', + 'mkfs', + '-t', fs_type, + devname_rtn(ctx, image), + ], + ) + yield + + +@contextlib.contextmanager +def generic_mount(ctx, config, devname_rtn): + """ + Generic Mount an rbd or tgt image. + + Rbd for example, now makes the following calls: + - rbd.create_image: [client.0] + - rbd.modprobe: [client.0] + - rbd.dev_create: [client.0] + - common_fs_utils.generic_mkfs: [client.0] + - common_fs_utils.generic_mount: + client.0: testimage.client.0 + """ + assert isinstance(config, list) or isinstance(config, dict), \ + "task mount must be configured with a list or dictionary" + if isinstance(config, dict): + role_images = config.items() + else: + role_images = [(role, None) for role in config] + + testdir = teuthology.get_testdir(ctx) + + mnt_template = '{tdir}/mnt.{id}' + mounted = [] + for role, image in role_images: + if image is None: + image = default_image_name(role) + (remote,) = ctx.cluster.only(role).remotes.keys() + _, _, id_ = teuthology.split_role(role) + mnt = mnt_template.format(tdir=testdir, id=id_) + mounted.append((remote, mnt)) + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ] + ) + + remote.run( + args=[ + 'sudo', + 'mount', + devname_rtn(ctx, image), + mnt, + ], + ) + + try: + yield + finally: + log.info("Unmounting rbd images... %s", mounted) + for remote, mnt in mounted: + remote.run( + args=[ + 'sudo', + 'umount', + mnt, + ], + ) + remote.run( + args=[ + 'rmdir', + '--', + mnt, + ] + ) diff --git a/teuthology/task/console_log.py b/teuthology/task/console_log.py new file mode 100644 index 0000000000..01b89351f5 --- /dev/null +++ b/teuthology/task/console_log.py @@ -0,0 +1,112 @@ +import logging +import os + +from teuthology.orchestra.cluster import Cluster +from teuthology.exit import exiter +from teuthology.task import Task + +log = logging.getLogger(__name__) + + +class ConsoleLog(Task): + enabled = True + name = 'console_log' + logfile_name = '{shortname}.log' + + def __init__(self, ctx=None, config=None): + super(ConsoleLog, self).__init__(ctx, config) + if self.config.get('enabled') is False: + self.enabled = False + if not getattr(self.ctx, 'archive', None): + self.enabled = False + if 'logfile_name' in self.config: + self.logfile_name = self.config['logfile_name'] + if 'remotes' in self.config: + self.remotes = self.config['remotes'] + + def filter_hosts(self): + super(ConsoleLog, self).filter_hosts() + if not hasattr(self.ctx, 'cluster'): + return + new_cluster = Cluster() + for (remote, roles) in self.cluster.remotes.items(): + if not hasattr(remote.console, 'spawn_sol_log'): + log.debug("%s does not support IPMI; excluding", + remote.shortname) + elif not (remote.console.has_ipmi_credentials or + remote.console.has_conserver): + log.debug("Cannot find IPMI credentials or conserver settings " + "for %s; excluding", + remote.shortname) + else: + new_cluster.add(remote, roles) + self.cluster = new_cluster + self.remotes = self.cluster.remotes.keys() + return self.cluster + + def setup(self): + if not self.enabled: + return + super(ConsoleLog, self).setup() + self.processes = dict() + self.signal_handlers = list() + self.setup_archive() + + def setup_archive(self): + self.archive_dir = os.path.join( + self.ctx.archive, + 'console_logs', + ) + if not os.path.isdir(self.archive_dir): + os.makedirs(self.archive_dir) + + def begin(self): + if not self.enabled: + return + super(ConsoleLog, self).begin() + self.start_logging() + + def start_logging(self): + for remote in self.remotes: + log_path = os.path.join( + self.archive_dir, + self.logfile_name.format(shortname=remote.shortname), + ) + proc = remote.console.spawn_sol_log(log_path) + self.processes[remote.shortname] = proc + + # Install a signal handler to make sure the console-logging + # processes are terminated if the job is killed + def kill_console_loggers(signal_, frame): + for (name, proc) in self.processes.items(): + log.debug("Killing console logger for %s", name) + proc.terminate() + exiter.add_handler(15, kill_console_loggers) + + def end(self): + if not self.enabled: + return + super(ConsoleLog, self).end() + self.stop_logging() + + def stop_logging(self, force=False): + for proc in self.processes.values(): + if proc.poll() is not None: + continue + if force: + proc.kill() + else: + proc.terminate() + + # Remove any signal handlers + for handler in self.signal_handlers: + handler.remove() + + def teardown(self): + if not self.enabled: + return + self.stop_logging(force=True) + super(ConsoleLog, self).teardown() + + +task = ConsoleLog diff --git a/teuthology/task/dump_ctx.py b/teuthology/task/dump_ctx.py new file mode 100644 index 0000000000..f2da22e121 --- /dev/null +++ b/teuthology/task/dump_ctx.py @@ -0,0 +1,19 @@ +import logging +import pprint + +log = logging.getLogger(__name__) +pp = pprint.PrettyPrinter(indent=4) + +def _pprint_me(thing, prefix): + return prefix + "\n" + pp.pformat(thing) + +def task(ctx, config): + """ + Dump task context and config in teuthology log/output + + The intended use case is didactic - to provide an easy way for newbies, who + are working on teuthology tasks for the first time, to find out what + is inside the ctx and config variables that are passed to each task. + """ + log.info(_pprint_me(ctx, "Task context:")) + log.info(_pprint_me(config, "Task config:")) diff --git a/teuthology/task/exec.py b/teuthology/task/exec.py new file mode 100644 index 0000000000..b3548c332d --- /dev/null +++ b/teuthology/task/exec.py @@ -0,0 +1,74 @@ +""" +Exececute custom commands +""" +import logging + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Execute commands on a given role + + tasks: + - ceph: + - kclient: [client.a] + - exec: + client.a: + - "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control" + - "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control" + - interactive: + + It stops and fails with the first command that does not return on success. It means + that if the first command fails, the second won't run at all. + + You can run a command on all hosts `all-hosts`, or all roles with `all-roles`: + + tasks: + - exec: + all-hosts: + - touch /etc/passwd + - exec: + all-roles: + - pwd + + To avoid confusion it is recommended to explicitly enclose the commands in + double quotes. For instance if the command is false (without double quotes) it will + be interpreted as a boolean by the YAML parser. + + :param ctx: Context + :param config: Configuration + """ + log.info('Executing custom commands...') + assert isinstance(config, dict), "task exec got invalid config" + + testdir = teuthology.get_testdir(ctx) + + if 'all' in config and len(config) == 1: + a = config['all'] + roles = teuthology.all_roles(ctx.cluster) + config = dict((id_, a) for id_ in roles) + elif 'all-roles' in config and len(config) == 1: + a = config['all-roles'] + roles = teuthology.all_roles(ctx.cluster) + config = dict((id_, a) for id_ in roles) + elif 'all-hosts' in config and len(config) == 1: + a = config['all-hosts'] + roles = [roles[0] for roles in ctx.cluster.remotes.values()] + config = dict((id_, a) for id_ in roles) + + for role, ls in config.items(): + (remote,) = ctx.cluster.only(role).remotes.keys() + log.info('Running commands on role %s host %s', role, remote.name) + for c in ls: + c.replace('$TESTDIR', testdir) + remote.run( + args=[ + 'sudo', + 'TESTDIR={tdir}'.format(tdir=testdir), + 'bash', + '-c', + c], + ) + diff --git a/teuthology/task/full_sequential.py b/teuthology/task/full_sequential.py new file mode 100644 index 0000000000..a9990f2aa3 --- /dev/null +++ b/teuthology/task/full_sequential.py @@ -0,0 +1,39 @@ +""" +Task sequencer - full +""" +import sys +import logging + +from teuthology import run_tasks + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Run a set of tasks to completion in order. __exit__ is called on a task + before __enter__ on the next + + example:: + - full_sequential: + - tasktest: + - tasktest: + + :param ctx: Context + :param config: Configuration + """ + for entry in config: + if not isinstance(entry, dict): + entry = ctx.config.get(entry, {}) + ((taskname, confg),) = entry.items() + log.info('In full_sequential, running task %s...' % taskname) + mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg) + if hasattr(mgr, '__enter__'): + try: + mgr.__enter__() + finally: + try: + exc_info = sys.exc_info() + mgr.__exit__(*exc_info) + finally: + del exc_info diff --git a/teuthology/task/full_sequential_finally.py b/teuthology/task/full_sequential_finally.py new file mode 100644 index 0000000000..76e3bbbdeb --- /dev/null +++ b/teuthology/task/full_sequential_finally.py @@ -0,0 +1,54 @@ +""" +Task sequencer finally +""" +import sys +import logging +import contextlib + +from teuthology import run_tasks + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Sequentialize a group of tasks into one executable block, run on cleanup + + example:: + + tasks: + - foo: + - full_sequential_finally: + - final1: + - final2: + - bar: + - baz: + + The final1 and final2 tasks will run when full_sequentiall_finally is torn + down, after the nested bar and baz tasks have run to completion, and right + before the preceding foo task is torn down. This is useful if there are + additional steps you want to interject in a job during the shutdown (instead + of startup) phase. + + :param ctx: Context + :param config: Configuration + """ + try: + yield + finally: + for entry in config: + if not isinstance(entry, dict): + entry = ctx.config.get(entry, {}) + ((taskname, confg),) = entry.items() + log.info('In full_sequential_finally, running task %s...' % taskname) + mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg) + if hasattr(mgr, '__enter__'): + try: + mgr.__enter__() + finally: + try: + exc_info = sys.exc_info() + mgr.__exit__(*exc_info) + finally: + del exc_info diff --git a/teuthology/task/hadoop.py b/teuthology/task/hadoop.py new file mode 100644 index 0000000000..7754a76959 --- /dev/null +++ b/teuthology/task/hadoop.py @@ -0,0 +1,424 @@ +from io import StringIO +import contextlib +import logging +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra import run +from teuthology.exceptions import UnsupportedPackageTypeError + +log = logging.getLogger(__name__) + +HADOOP_2x_URL = "https://archive.apache.org/dist/hadoop/core/hadoop-2.5.2/hadoop-2.5.2.tar.gz" + +def dict_to_hadoop_conf(items): + out = "\n" + for key, value in items.items(): + out += " \n" + out += " " + key + "\n" + out += " " + value + "\n" + out += " \n" + out += "\n" + return out + +def is_hadoop_type(type_): + return lambda role: role.startswith('hadoop.' + type_) + +def get_slaves_data(ctx): + tempdir = teuthology.get_testdir(ctx) + path = "{tdir}/hadoop/etc/hadoop/slaves".format(tdir=tempdir) + nodes = ctx.cluster.only(is_hadoop_type('slave')) + hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes] + data = '\n'.join(hosts) + return path, data + +def get_masters_data(ctx): + tempdir = teuthology.get_testdir(ctx) + path = "{tdir}/hadoop/etc/hadoop/masters".format(tdir=tempdir) + nodes = ctx.cluster.only(is_hadoop_type('master')) + hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes] + data = '\n'.join(hosts) + return path, data + +def get_core_site_data(ctx, config): + tempdir = teuthology.get_testdir(ctx) + path = "{tdir}/hadoop/etc/hadoop/core-site.xml".format(tdir=tempdir) + nodes = ctx.cluster.only(is_hadoop_type('master')) + host = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes][0] + + conf = {} + if config.get('hdfs', False): + conf.update({ + 'fs.defaultFS': 'hdfs://{namenode}:9000', + 'hadoop.tmp.dir': '{tdir}/hadoop_tmp', + }) + else: + conf.update({ + 'fs.default.name': 'ceph://{namenode}:6789/', + 'fs.defaultFS': 'ceph://{namenode}:6789/', + 'ceph.conf.file': '/etc/ceph/ceph.conf', + 'ceph.mon.address': '{namenode}:6789', + 'ceph.auth.id': 'admin', + #'ceph.data.pools': 'cephfs_data', + 'fs.AbstractFileSystem.ceph.impl': 'org.apache.hadoop.fs.ceph.CephFs', + 'fs.ceph.impl': 'org.apache.hadoop.fs.ceph.CephFileSystem', + }) + + data_tmpl = dict_to_hadoop_conf(conf) + return path, data_tmpl.format(tdir=tempdir, namenode=host) + +def get_mapred_site_data(ctx): + data_tmpl = """ + + + mapred.job.tracker + {namenode}:9001 + + + mapreduce.framework.name + yarn + + +""" + tempdir = teuthology.get_testdir(ctx) + path = "{tdir}/hadoop/etc/hadoop/mapred-site.xml".format(tdir=tempdir) + nodes = ctx.cluster.only(is_hadoop_type('master')) + hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes] + assert len(hosts) == 1 + host = hosts[0] + return path, data_tmpl.format(namenode=host) + +def get_yarn_site_data(ctx): + conf = {} + conf.update({ + 'yarn.resourcemanager.resourcetracker.address': '{namenode}:8025', + 'yarn.resourcemanager.scheduler.address': '{namenode}:8030', + 'yarn.resourcemanager.address': '{namenode}:8050', + 'yarn.resourcemanager.admin.address': '{namenode}:8041', + 'yarn.resourcemanager.hostname': '{namenode}', + 'yarn.nodemanager.aux-services': 'mapreduce_shuffle', + 'yarn.nodemanager.sleep-delay-before-sigkill.ms': '10000', + }) + data_tmpl = dict_to_hadoop_conf(conf) + + tempdir = teuthology.get_testdir(ctx) + path = "{tdir}/hadoop/etc/hadoop/yarn-site.xml".format(tdir=tempdir) + nodes = ctx.cluster.only(is_hadoop_type('master')) + hosts = [s.ssh.get_transport().getpeername()[0] for s in nodes.remotes] + assert len(hosts) == 1 + host = hosts[0] + return path, data_tmpl.format(namenode=host) + +def get_hdfs_site_data(ctx): + data = """ + + + dfs.replication + 1 + + +""" + tempdir = teuthology.get_testdir(ctx) + path = "{tdir}/hadoop/etc/hadoop/hdfs-site.xml".format(tdir=tempdir) + return path, data + +def configure(ctx, config, hadoops): + tempdir = teuthology.get_testdir(ctx) + + log.info("Writing Hadoop slaves file...") + for remote in hadoops.remotes: + path, data = get_slaves_data(ctx) + teuthology.write_file(remote, path, StringIO(data)) + + log.info("Writing Hadoop masters file...") + for remote in hadoops.remotes: + path, data = get_masters_data(ctx) + teuthology.write_file(remote, path, StringIO(data)) + + log.info("Writing Hadoop core-site.xml file...") + for remote in hadoops.remotes: + path, data = get_core_site_data(ctx, config) + teuthology.write_file(remote, path, StringIO(data)) + + log.info("Writing Hadoop yarn-site.xml file...") + for remote in hadoops.remotes: + path, data = get_yarn_site_data(ctx) + teuthology.write_file(remote, path, StringIO(data)) + + log.info("Writing Hadoop hdfs-site.xml file...") + for remote in hadoops.remotes: + path, data = get_hdfs_site_data(ctx) + teuthology.write_file(remote, path, StringIO(data)) + + log.info("Writing Hadoop mapred-site.xml file...") + for remote in hadoops.remotes: + path, data = get_mapred_site_data(ctx) + teuthology.write_file(remote, path, StringIO(data)) + + log.info("Setting JAVA_HOME in hadoop-env.sh...") + for remote in hadoops.remotes: + path = "{tdir}/hadoop/etc/hadoop/hadoop-env.sh".format(tdir=tempdir) + if remote.os.package_type == 'rpm': + data = "JAVA_HOME=/usr/lib/jvm/java\n" + elif remote.os.package_type == 'deb': + data = "JAVA_HOME=/usr/lib/jvm/default-java\n" + else: + raise UnsupportedPackageTypeError(remote) + teuthology.prepend_lines_to_file(remote, path, data) + + if config.get('hdfs', False): + log.info("Formatting HDFS...") + testdir = teuthology.get_testdir(ctx) + hadoop_dir = "{tdir}/hadoop/".format(tdir=testdir) + masters = ctx.cluster.only(is_hadoop_type('master')) + assert len(masters.remotes) == 1 + master = next(iter(masters.remotes.keys())) + master.run( + args = [ + hadoop_dir + "bin/hadoop", + "namenode", + "-format" + ], + wait = True, + ) + +@contextlib.contextmanager +def install_hadoop(ctx, config): + testdir = teuthology.get_testdir(ctx) + + log.info("Downloading Hadoop...") + hadoop_tarball = "{tdir}/hadoop.tar.gz".format(tdir=testdir) + hadoops = ctx.cluster.only(is_hadoop_type('')) + run.wait( + hadoops.run( + args = [ + 'wget', + '-nv', + '-O', + hadoop_tarball, + HADOOP_2x_URL + ], + wait = False, + ) + ) + + log.info("Create directory for Hadoop install...") + hadoop_dir = "{tdir}/hadoop".format(tdir=testdir) + run.wait( + hadoops.run( + args = [ + 'mkdir', + hadoop_dir + ], + wait = False, + ) + ) + + log.info("Unpacking Hadoop...") + run.wait( + hadoops.run( + args = [ + 'tar', + 'xzf', + hadoop_tarball, + '--strip-components=1', + '-C', + hadoop_dir + ], + wait = False, + ) + ) + + log.info("Removing Hadoop download...") + run.wait( + hadoops.run( + args = [ + 'rm', + hadoop_tarball + ], + wait = False, + ) + ) + + log.info("Create Hadoop temporary directory...") + hadoop_tmp_dir = "{tdir}/hadoop_tmp".format(tdir=testdir) + run.wait( + hadoops.run( + args = [ + 'mkdir', + hadoop_tmp_dir + ], + wait = False, + ) + ) + + if not config.get('hdfs', False): + log.info("Fetching cephfs-hadoop...") + + sha1, url = teuthology.get_ceph_binary_url( + package = "hadoop", + format = "jar", + dist = "precise", + arch = "x86_64", + flavor = "default", + branch = "main") + + run.wait( + hadoops.run( + args = [ + 'wget', + '-nv', + '-O', + "{tdir}/cephfs-hadoop.jar".format(tdir=testdir), # FIXME + url + "/cephfs-hadoop-0.80.6.jar", # FIXME + ], + wait = False, + ) + ) + + run.wait( + hadoops.run( + args = [ + 'mv', + "{tdir}/cephfs-hadoop.jar".format(tdir=testdir), + "{tdir}/hadoop/share/hadoop/common/".format(tdir=testdir), + ], + wait = False, + ) + ) + + # Copy JNI native bits. Need to do this explicitly because the + # handling is dependent on the os-type. + for remote in hadoops.remotes: + libcephfs_jni_path = None + if remote.os.package_type == 'rpm': + libcephfs_jni_path = "/usr/lib64/libcephfs_jni.so.1.0.0" + elif remote.os.package_type == 'deb': + libcephfs_jni_path = "/usr/lib/jni/libcephfs_jni.so" + else: + raise UnsupportedPackageTypeError(remote) + + libcephfs_jni_fname = "libcephfs_jni.so" + remote.run( + args = [ + 'cp', + libcephfs_jni_path, + "{tdir}/hadoop/lib/native/{fname}".format(tdir=testdir, + fname=libcephfs_jni_fname), + ]) + + run.wait( + hadoops.run( + args = [ + 'cp', + "/usr/share/java/libcephfs.jar", + "{tdir}/hadoop/share/hadoop/common/".format(tdir=testdir), + ], + wait = False, + ) + ) + + configure(ctx, config, hadoops) + + try: + yield + finally: + run.wait( + hadoops.run( + args = [ + 'rm', + '-rf', + hadoop_dir, + hadoop_tmp_dir + ], + wait = False, + ) + ) + +@contextlib.contextmanager +def start_hadoop(ctx, config): + testdir = teuthology.get_testdir(ctx) + hadoop_dir = "{tdir}/hadoop/".format(tdir=testdir) + masters = ctx.cluster.only(is_hadoop_type('master')) + assert len(masters.remotes) == 1 + master = next(iter(masters.remotes.keys())) + + log.info("Stopping Hadoop daemons") + master.run( + args = [ + hadoop_dir + "sbin/stop-yarn.sh" + ], + wait = True, + ) + + master.run( + args = [ + hadoop_dir + "sbin/stop-dfs.sh" + ], + wait = True, + ) + + if config.get('hdfs', False): + log.info("Starting HDFS...") + master.run( + args = [ + hadoop_dir + "sbin/start-dfs.sh" + ], + wait = True, + ) + + log.info("Starting YARN...") + master.run( + args = [ + hadoop_dir + "sbin/start-yarn.sh" + ], + wait = True, + ) + + try: + yield + + finally: + log.info("Stopping Hadoop daemons") + + master.run( + args = [ + hadoop_dir + "sbin/stop-yarn.sh" + ], + wait = True, + ) + + master.run( + args = [ + hadoop_dir + "sbin/stop-dfs.sh" + ], + wait = True, + ) + + run.wait( + ctx.cluster.run( + args = [ + 'sudo', + 'skill', + '-9', + 'java' + ], + wait = False + ) + ) + +@contextlib.contextmanager +def task(ctx, config): + if config is None: + config = {} + assert isinstance(config, dict), "task hadoop config must be dictionary" + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('hadoop', {})) + + tasks = [ + lambda: install_hadoop(ctx=ctx, config=config), + lambda: start_hadoop(ctx=ctx, config=config), + ] + + with contextutil.nested(*tasks): + yield diff --git a/teuthology/task/install/__init__.py b/teuthology/task/install/__init__.py new file mode 100644 index 0000000000..686ab5e053 --- /dev/null +++ b/teuthology/task/install/__init__.py @@ -0,0 +1,619 @@ +import contextlib +import copy +import logging +import os +import subprocess +import yaml + +from teuthology import misc as teuthology +from teuthology import contextutil, packaging +from teuthology.parallel import parallel +from teuthology.task import ansible + +from distutils.version import LooseVersion +from teuthology.task.install.util import ( + _get_builder_project, get_flavor, ship_utilities, +) + +from teuthology.task.install import rpm, deb, redhat + +log = logging.getLogger(__name__) + +def get_upgrade_version(ctx, config, remote): + builder = _get_builder_project(ctx, remote, config) + version = builder.version + return version + +def verify_package_version(ctx, config, remote): + """ + Ensures that the version of package installed is what + was asked for in the config. + + For most cases this is for ceph, but we also install samba + for example. + """ + # Do not verify the version if the ceph-deploy task is being used to + # install ceph. Verifying the ceph installed by ceph-deploy should work, + # but the qa suites will need reorganized first to run ceph-deploy + # before the install task. + # see: http://tracker.ceph.com/issues/11248 + if config.get("extras"): + log.info("Skipping version verification...") + return True + if 'repos' in config and config.get('repos'): + log.info("Skipping version verification because we have custom repos...") + return True + builder = _get_builder_project(ctx, remote, config) + version = builder.version + pkg_to_check = builder.project + installed_ver = packaging.get_package_version(remote, pkg_to_check) + if installed_ver and version in installed_ver: + msg = "The correct {pkg} version {ver} is installed.".format( + ver=version, + pkg=pkg_to_check + ) + log.info(msg) + else: + raise RuntimeError( + "{pkg} version {ver} was not installed, found {installed}.".format( + ver=version, + installed=installed_ver, + pkg=pkg_to_check + ) + ) + + +def install_packages(ctx, pkgs, config): + """ + Installs packages on each remote in ctx. + + :param ctx: the argparse.Namespace object + :param pkgs: list of packages names to install + :param config: the config dict + """ + install_pkgs = { + "deb": deb._update_package_list_and_install, + "rpm": rpm._update_package_list_and_install, + } + with parallel() as p: + for remote in ctx.cluster.remotes.keys(): + system_type = teuthology.get_system_type(remote) + p.spawn( + install_pkgs[system_type], + ctx, remote, pkgs[system_type], config) + + for remote in ctx.cluster.remotes.keys(): + # verifies that the install worked as expected + verify_package_version(ctx, config, remote) + + +def remove_packages(ctx, config, pkgs): + """ + Removes packages from each remote in ctx. + + :param ctx: the argparse.Namespace object + :param config: the config dict + :param pkgs: list of packages names to remove + """ + remove_pkgs = { + "deb": deb._remove, + "rpm": rpm._remove, + } + cleanup = config.get('cleanup', False) + with parallel() as p: + for remote in ctx.cluster.remotes.keys(): + if not remote.is_reimageable or cleanup: + system_type = teuthology.get_system_type(remote) + p.spawn(remove_pkgs[ + system_type], ctx, config, remote, pkgs[system_type]) + + +def remove_sources(ctx, config): + """ + Removes repo source files from each remote in ctx. + + :param ctx: the argparse.Namespace object + :param config: the config dict + """ + remove_sources_pkgs = { + 'deb': deb._remove_sources_list, + 'rpm': rpm._remove_sources_list, + } + cleanup = config.get('cleanup', False) + project = config.get('project', 'ceph') + with parallel() as p: + for remote in ctx.cluster.remotes.keys(): + if not remote.is_reimageable or cleanup: + log.info("Removing {p} sources lists on {r}" + .format(p=project,r=remote)) + remove_fn = remove_sources_pkgs[remote.os.package_type] + p.spawn(remove_fn, ctx, config, remote) + + +def get_package_list(ctx, config): + debug = config.get('debuginfo', False) + project = config.get('project', 'ceph') + yaml_path = None + # Look for /packages/packages.yaml + if hasattr(ctx, 'config') and 'suite_path' in ctx.config: + suite_packages_path = os.path.join( + ctx.config['suite_path'], + 'packages', + 'packages.yaml', + ) + if os.path.exists(suite_packages_path): + yaml_path = suite_packages_path + # If packages.yaml isn't found in the suite_path, potentially use + # teuthology's + yaml_path = yaml_path or os.path.join( + os.path.dirname(__file__), + 'packages.yaml', + ) + default_packages = yaml.safe_load(open(yaml_path)) + default_debs = default_packages.get(project, dict()).get('deb', []) + default_rpms = default_packages.get(project, dict()).get('rpm', []) + # If a custom deb and/or rpm list is provided via the task config, use + # that. Otherwise, use the list from whichever packages.yaml was found + # first + debs = config.get('packages', dict()).get('deb', default_debs) + rpms = config.get('packages', dict()).get('rpm', default_rpms) + # Optionally include or exclude debug packages + if not debug: + debs = [p for p in debs if not p.endswith('-dbg')] + rpms = [p for p in rpms if not p.endswith('-debuginfo')] + + def exclude(pkgs, exclude_list): + return list(pkg for pkg in pkgs if pkg not in exclude_list) + + excluded_packages = config.get('exclude_packages', []) + if isinstance(excluded_packages, dict): + log.debug("Excluding packages: {}".format(excluded_packages)) + debs = exclude(debs, excluded_packages.get('deb', [])) + rpms = exclude(rpms, excluded_packages.get('rpm', [])) + else: + debs = exclude(debs, excluded_packages) + rpms = exclude(rpms, excluded_packages) + + package_list = dict(deb=debs, rpm=rpms) + log.debug("Package list is: {}".format(package_list)) + return package_list + + +@contextlib.contextmanager +def install(ctx, config): + """ + The install task. Installs packages for a given project on all hosts in + ctx. May work for projects besides ceph, but may not. Patches welcomed! + + :param ctx: the argparse.Namespace object + :param config: the config dict + """ + + package_list = get_package_list(ctx, config) + debs = package_list['deb'] + rpms = package_list['rpm'] + + # pull any additional packages out of config + extra_pkgs = config.get('extra_packages', []) + log.info('extra packages: {packages}'.format(packages=extra_pkgs)) + if isinstance(extra_pkgs, dict): + debs += extra_pkgs.get('deb', []) + rpms += extra_pkgs.get('rpm', []) + else: + debs += extra_pkgs + rpms += extra_pkgs + + # When extras is in the config we want to purposely not install ceph. + # This is typically used on jobs that use ceph-deploy to install ceph + # or when we are testing ceph-deploy directly. The packages being + # installed are needed to properly test ceph as ceph-deploy won't + # install these. 'extras' might not be the best name for this. + extras = config.get('extras') + if extras is not None: + debs = ['ceph-test', 'ceph-fuse', + 'librados2', 'librbd1', + 'python-ceph'] + rpms = ['ceph-fuse', 'librbd1', 'librados2', 'ceph-test', 'python-ceph'] + package_list = dict(deb=debs, rpm=rpms) + install_packages(ctx, package_list, config) + try: + yield + finally: + remove_packages(ctx, config, package_list) + remove_sources(ctx, config) + + +def upgrade_old_style(ctx, node, remote, pkgs, system_type): + """ + Handle the upgrade using methods in use prior to ceph-deploy. + """ + if system_type == 'deb': + deb._upgrade_packages(ctx, node, remote, pkgs) + elif system_type == 'rpm': + rpm._upgrade_packages(ctx, node, remote, pkgs) + + +def upgrade_with_ceph_deploy(ctx, node, remote, pkgs, sys_type): + """ + Upgrade using ceph-deploy + """ + dev_table = ['branch', 'tag', 'dev'] + ceph_dev_parm = '' + ceph_rel_parm = '' + for entry in node.keys(): + if entry in dev_table: + ceph_dev_parm = node[entry] + if entry == 'release': + ceph_rel_parm = node[entry] + params = [] + if ceph_dev_parm: + params += ['--dev', ceph_dev_parm] + if ceph_rel_parm: + params += ['--release', ceph_rel_parm] + params.append(remote.name) + subprocess.call(['ceph-deploy', 'install'] + params) + remote.run(args=['sudo', 'restart', 'ceph-all']) + + +def upgrade_remote_to_config(ctx, config): + assert config is None or isinstance(config, dict), \ + "install.upgrade only supports a dictionary for configuration" + + project = config.get('project', 'ceph') + + # use 'install' overrides here, in case the upgrade target is left + # unspecified/implicit. + install_overrides = ctx.config.get( + 'overrides', {}).get('install', {}).get(project, {}) + log.info('project %s config %s overrides %s', project, config, + install_overrides) + + # build a normalized remote -> config dict + remotes = {} + if 'all' in config: + for remote in ctx.cluster.remotes.keys(): + remotes[remote] = config.get('all') + else: + for role in config.keys(): + remotes_dict = ctx.cluster.only(role).remotes + if not remotes_dict: + # This is a regular config argument, not a role + continue + # take any remote in the dict + remote = next(iter(remotes_dict)) + if remote in remotes: + log.warning('remote %s came up twice (role %s)', remote, role) + continue + remotes[remote] = config.get(role) + + result = {} + for remote, node in remotes.items(): + if not node: + node = {} + + this_overrides = copy.deepcopy(install_overrides) + if 'sha1' in node or 'tag' in node or 'branch' in node: + log.info("config contains sha1|tag|branch, " + "removing those keys from override") + this_overrides.pop('sha1', None) + this_overrides.pop('tag', None) + this_overrides.pop('branch', None) + teuthology.deep_merge(node, this_overrides) + log.info('remote %s config %s', remote, node) + node['project'] = project + + result[remote] = node + + return result + +def _upgrade_is_downgrade(installed_version, upgrade_version): + assert installed_version, "installed_version is empty" + assert upgrade_version, "upgrade_version is empty" + return LooseVersion(installed_version) > LooseVersion(upgrade_version) + +def upgrade_common(ctx, config, deploy_style): + """ + Common code for upgrading + """ + remotes = upgrade_remote_to_config(ctx, config) + project = config.get('project', 'ceph') + + extra_pkgs = config.get('extra_packages', []) + log.info('extra packages: {packages}'.format(packages=extra_pkgs)) + + for remote, node in remotes.items(): + + system_type = teuthology.get_system_type(remote) + assert system_type in ('deb', 'rpm') + pkgs = get_package_list(ctx, config)[system_type] + log.info("Upgrading {proj} {system_type} packages: {pkgs}".format( + proj=project, system_type=system_type, pkgs=', '.join(pkgs))) + if isinstance(extra_pkgs, dict): + pkgs += extra_pkgs.get(system_type, []) + else: + pkgs += extra_pkgs + + installed_version = packaging.get_package_version(remote, 'ceph-common') + upgrade_version = get_upgrade_version(ctx, node, remote) + log.info("Ceph {s} upgrade from {i} to {u}".format( + s=system_type, + i=installed_version, + u=upgrade_version + )) + if _upgrade_is_downgrade(installed_version, upgrade_version): + raise RuntimeError( + "An attempt to upgrade from a higher version to a lower one " + "will always fail. Hint: check tags in the target git branch." + ) + + + deploy_style(ctx, node, remote, pkgs, system_type) + verify_package_version(ctx, node, remote) + return len(remotes) + +docstring_for_upgrade = """" + Upgrades packages for a given project. + + For example:: + + tasks: + - install.{cmd_parameter}: + all: + branch: end + + or specify specific roles:: + + tasks: + - install.{cmd_parameter}: + mon.a: + branch: end + osd.0: + branch: other + + or rely on the overrides for the target version:: + + overrides: + install: + ceph: + sha1: ... + tasks: + - install.{cmd_parameter}: + all: + + (HACK: the overrides will *only* apply the sha1/branch/tag if those + keys are not present in the config.) + + It is also possible to attempt to exclude packages from the upgrade set: + + tasks: + - install.{cmd_parameter}: + exclude_packages: ['ceph-test', 'ceph-test-dbg'] + + :param ctx: the argparse.Namespace object + :param config: the config dict + """ + +# +# __doc__ strings for upgrade and ceph_deploy_upgrade are set from +# the same string so that help(upgrade) and help(ceph_deploy_upgrade) +# look the same. +# + + +@contextlib.contextmanager +def upgrade(ctx, config): + upgrade_common(ctx, config, upgrade_old_style) + yield + +upgrade.__doc__ = docstring_for_upgrade.format(cmd_parameter='upgrade') + + +@contextlib.contextmanager +def ceph_deploy_upgrade(ctx, config): + upgrade_common(ctx, config, upgrade_with_ceph_deploy) + yield + +ceph_deploy_upgrade.__doc__ = docstring_for_upgrade.format( + cmd_parameter='ceph_deploy_upgrade') + + +@contextlib.contextmanager +def task(ctx, config): + """ + Install packages for a given project. + + tasks: + - install: + project: ceph + branch: bar + - install: + project: samba + branch: foo + extra_packages: ['samba'] + - install: + extra_packages: + deb: ['librados-dev', 'libradosstriper-dev'] + rpm: ['librados-devel', 'libradosstriper-devel'] + extra_system_packages: + deb: ['libboost-system-dev'] + rpm: ['boost-devel'] + - install: + rhbuild: 1.3.0 + playbook: downstream_setup.yml + vars: + yum_repos: + - url: "http://location.repo" + name: "ceph_repo" + + Add repos before trying to install any package (all Shaman-related tasks + will be ignored): + + - install: + repos: + - name: "repo-alias" + priority: 1 + url: "http://location.repo" + + Note: The 'repos' are supported for SUSE-based distros only, but patches + are welcome to add support for other distros. + + + Overrides are project specific: + + overrides: + install: + ceph: + sha1: ... + + + Debug packages may optionally be installed: + + overrides: + install: + ceph: + debuginfo: true + + + Default package lists (which come from packages.yaml) may be overridden: + + overrides: + install: + ceph: + packages: + deb: + - ceph-osd + - ceph-mon + rpm: + - ceph-devel + - rbd-fuse + + When tag, branch and sha1 do not reference the same commit hash, the + tag takes precedence over the branch and the branch takes precedence + over the sha1. + + When the overrides have a sha1 that is different from the sha1 of + the project to be installed, it will be a noop if the project has + a branch or tag, because they take precedence over the sha1. For + instance: + + overrides: + install: + ceph: + sha1: 1234 + + tasks: + - install: + project: ceph + sha1: 4567 + branch: foobar # which has sha1 4567 + + The override will transform the tasks as follows: + + tasks: + - install: + project: ceph + sha1: 1234 + branch: foobar # which has sha1 4567 + + But the branch takes precedence over the sha1 and foobar + will be installed. The override of the sha1 has no effect. + + When passed 'rhbuild' as a key, it will attempt to install an rh ceph build + using ceph-deploy + + Normally, the package management system will try to install or upgrade + specified packages as instructed. But if newer versions of these packages + to be installed have been installed on test node, we will have to uninstall + or downgrade them. To downgrade multiple packages in a single shot: + + tasks: + - install: + project: ceph + branch: hammer + downgrade_packages: ['librados2', 'librbd1'] + + Reminder regarding teuthology-suite side effects: + + The teuthology-suite command always adds the following: + + overrides: + install: + ceph: + sha1: 1234 + + where sha1 matches the --ceph argument. For instance if + teuthology-suite is called with --ceph main, the sha1 will be + the tip of main. If called with --ceph v0.94.1, the sha1 will be + the v0.94.1 (as returned by git rev-parse v0.94.1 which is not to + be confused with git rev-parse v0.94.1^{commit}) + + :param ctx: the argparse.Namespace object + :param config: the config dict + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + "task install only supports a dictionary for configuration" + + project, = config.get('project', 'ceph'), + log.debug('project %s' % project) + overrides = ctx.config.get('overrides') + repos = None + if overrides: + install_overrides = overrides.get('install', {}) + teuthology.deep_merge(config, install_overrides.get(project, {})) + repos = install_overrides.get('repos', None) + log.debug('INSTALL overrides: %s' % install_overrides) + log.debug('config %s' % config) + + rhbuild = None + if config.get('rhbuild'): + rhbuild = config.get('rhbuild') + log.info("Build is %s " % rhbuild) + + flavor = get_flavor(config) + log.info("Using flavor: %s", flavor) + + ctx.summary['flavor'] = flavor + nested_tasks = [lambda: redhat.install(ctx=ctx, config=config), + lambda: ship_utilities(ctx=ctx, config=None)] + + if config.get('rhbuild'): + if config.get('playbook'): + ansible_config = dict(config) + # remove key not required by ansible task + del ansible_config['rhbuild'] + nested_tasks.insert( + 0, + lambda: ansible.CephLab(ctx, config=ansible_config) + ) + with contextutil.nested(*nested_tasks): + yield + else: + nested_config = dict( + branch=config.get('branch'), + cleanup=config.get('cleanup'), + debuginfo=config.get('debuginfo'), + downgrade_packages=config.get('downgrade_packages', []), + exclude_packages=config.get('exclude_packages', []), + extra_packages=config.get('extra_packages', []), + extra_system_packages=config.get('extra_system_packages', []), + extras=config.get('extras', None), + flavor=flavor, + install_ceph_packages=config.get('install_ceph_packages', True), + packages=config.get('packages', dict()), + project=project, + repos_only=config.get('repos_only', False), + sha1=config.get('sha1'), + tag=config.get('tag'), + wait_for_package=config.get('wait_for_package', False), + ) + if repos: + nested_config['repos'] = repos + if 'shaman' in config: + nested_config['shaman'] = config['shaman'] + with contextutil.nested( + lambda: install(ctx=ctx, config=nested_config), + lambda: ship_utilities(ctx=ctx, config=None), + ): + yield diff --git a/teuthology/task/install/adjust-ulimits b/teuthology/task/install/adjust-ulimits new file mode 100755 index 0000000000..6f05392b90 --- /dev/null +++ b/teuthology/task/install/adjust-ulimits @@ -0,0 +1,16 @@ +#!/bin/sh +# If we're running as root, allow large amounts of open files. +USER=$(whoami) + +# If a ulimit call fails, exit immediately. +set -e + +if [ "$USER" = "root" ] +then + # Enable large number of open files + ulimit -n 65536 +fi + +# Enable core dumps for everything +ulimit -c unlimited +exec "$@" diff --git a/teuthology/task/install/daemon-helper b/teuthology/task/install/daemon-helper new file mode 100755 index 0000000000..3638a6d732 --- /dev/null +++ b/teuthology/task/install/daemon-helper @@ -0,0 +1,114 @@ +#!/usr/bin/python3 + +""" +Helper script for running long-living processes. + +(Name says daemon, but that is intended to mean "long-living", we +assume child process does not double-fork.) + +We start the command passed as arguments, with /dev/null as stdin, and +then wait for EOF on stdin. + +When EOF is seen on stdin, the child process is killed. + +When the child process exits, this helper exits too. + +Usage: + daemon-helper [--kill-group] [nostdin] COMMAND ... +""" + +from __future__ import print_function + +import fcntl +import os +import select +import signal +import struct +import subprocess +import sys +from argparse import ArgumentParser + +parser = ArgumentParser(epilog= + 'The remaining parameters are the command to be run. If these\n' + + 'parameters start wih nostdin, then no stdin input is expected.') +parser.add_argument('signal') +parser.add_argument('--kill-group', action='store_true', + help='kill all processes in the group') +parser.add_argument('--nostdin', action='store_true', + help='no stdin input expected') +parsed, args = parser.parse_known_args() +end_signal = signal.SIGKILL +if parsed.signal == 'term': + end_signal = signal.SIGTERM +group = parsed.kill_group +nostdin = parsed.nostdin +skip_nostdin = 0 +try: + if args[0] == 'nostdin': + nostdin = True + skip_nostdin = 1 +except IndexError: + print('No command specified') + sys.exit(1) + + +proc = None +if nostdin: + if len(args) - skip_nostdin == 0: + print('No command specified') + sys.exit(1) + proc = subprocess.Popen( + args=args[skip_nostdin:], + ) +else: + with open('/dev/null', 'rb') as devnull: + proc = subprocess.Popen( + args=args, + stdin=devnull, + preexec_fn=os.setsid, + ) + +flags = fcntl.fcntl(0, fcntl.F_GETFL) +fcntl.fcntl(0, fcntl.F_SETFL, flags | os.O_NDELAY) + +saw_eof = False +while True: + r,w,x = select.select([0], [], [0], 0.2) + if r: + data = os.read(0, 1) + if not data: + saw_eof = True + if not group: + proc.send_signal(end_signal) + else: + os.killpg(proc.pid, end_signal) + break + else: + sig, = struct.unpack('!b', data) + if not group: + proc.send_signal(sig) + else: + os.killpg(proc.pid, end_signal) + + + if proc.poll() is not None: + # child exited + break + +exitstatus = proc.wait() +if exitstatus > 0: + print('{me}: command failed with exit status {exitstatus:d}'.format( + me=os.path.basename(sys.argv[0]), + exitstatus=exitstatus, + ), file=sys.stderr) + sys.exit(exitstatus) +elif exitstatus < 0: + if saw_eof and exitstatus == -end_signal: + # suppress error from the exit we intentionally caused + pass + else: + print('{me}: command crashed with signal {signal:d}'.format( + me=os.path.basename(sys.argv[0]), + signal=-exitstatus, + ), file=sys.stderr) + sys.exit(1) diff --git a/teuthology/task/install/deb.py b/teuthology/task/install/deb.py new file mode 100644 index 0000000000..92ad7b2d62 --- /dev/null +++ b/teuthology/task/install/deb.py @@ -0,0 +1,226 @@ +import logging +import os + +from io import StringIO + +from teuthology.orchestra import run +from teuthology.contextutil import safe_while + +from teuthology.task.install.util import _get_builder_project, _get_local_dir + + +log = logging.getLogger(__name__) + +def _retry_if_eagain_in_output(remote, args): + # wait at most 5 minutes + with safe_while(sleep=10, tries=30) as proceed: + while proceed(): + stderr = StringIO() + try: + return remote.run(args=args, stderr=stderr) + except run.CommandFailedError: + if "could not get lock" in stderr.getvalue().lower(): + stdout = StringIO() + args = ['sudo', 'fuser', '-v', '/var/lib/dpkg/lock-frontend'] + remote.run(args=args, stdout=stdout) + log.info("The processes holding 'lock-frontend':\n{}".format(stdout.getvalue())) + continue + else: + raise + +def install_dep_packages(remote, args): + _retry_if_eagain_in_output(remote, args) + +def _update_package_list_and_install(ctx, remote, debs, config): + """ + Runs ``apt-get update`` first, then runs ``apt-get install``, installing + the requested packages on the remote system. + + TODO: split this into at least two functions. + + :param ctx: the argparse.Namespace object + :param remote: the teuthology.orchestra.remote.Remote object + :param debs: list of packages names to install + :param config: the config dict + """ + + # check for ceph release key + r = remote.run( + args=[ + 'sudo', 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph', + ], + stdout=StringIO(), + check_status=False, + ) + if r.stdout.getvalue().find('Ceph automated package') == -1: + # if it doesn't exist, add it + remote.run( + args=[ + 'wget', '-q', '-O-', + 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', # noqa + run.Raw('|'), + 'sudo', 'apt-key', 'add', '-', + ], + stdout=StringIO(), + ) + + builder = _get_builder_project(ctx, remote, config) + log.info("Installing packages: {pkglist} on remote deb {arch}".format( + pkglist=", ".join(debs), arch=builder.arch) + ) + system_pkglist = config.get('extra_system_packages') + if system_pkglist: + if isinstance(system_pkglist, dict): + system_pkglist = system_pkglist.get('deb') + log.info("Installing system (non-project) packages: {pkglist} on remote deb {arch}".format( + pkglist=", ".join(system_pkglist), arch=builder.arch) + ) + # get baseurl + log.info('Pulling from %s', builder.base_url) + + version = builder.version + log.info('Package version is %s', version) + + builder.install_repo() + + remote.run(args=['sudo', 'apt-get', 'update'], check_status=False) + install_cmd = [ + 'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', + '--force-yes', + '-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw( + 'Dpkg::Options::="--force-confold"'), + 'install', + ] + install_dep_packages(remote, + args=install_cmd + ['%s=%s' % (d, version) for d in debs], + ) + if system_pkglist: + install_dep_packages(remote, + args=install_cmd + system_pkglist, + ) + ldir = _get_local_dir(config, remote) + if ldir: + for fyle in os.listdir(ldir): + fname = "%s/%s" % (ldir, fyle) + remote.run(args=['sudo', 'dpkg', '-i', fname],) + + +def _remove(ctx, config, remote, debs): + """ + Removes Debian packages from remote, rudely + + TODO: be less rude (e.g. using --force-yes) + + :param ctx: the argparse.Namespace object + :param config: the config dict + :param remote: the teuthology.orchestra.remote.Remote object + :param debs: list of packages names to install + """ + log.info("Removing packages: {pkglist} on Debian system.".format( + pkglist=", ".join(debs))) + # first ask nicely + remote.run( + args=[ + 'for', 'd', 'in', + ] + debs + [ + run.Raw(';'), + 'do', + 'sudo', + 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes', + '-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw( + 'Dpkg::Options::="--force-confold"'), 'purge', + run.Raw('$d'), + run.Raw('||'), + 'true', + run.Raw(';'), + 'done', + ]) + # mop up anything that is broken + remote.run( + args=[ + 'dpkg', '-l', + run.Raw('|'), + # Any package that is unpacked or half-installed and also requires + # reinstallation + 'grep', '^.\(U\|H\)R', + run.Raw('|'), + 'awk', '{print $2}', + run.Raw('|'), + 'sudo', + 'xargs', '--no-run-if-empty', + 'dpkg', '-P', '--force-remove-reinstreq', + ]) + # then let apt clean up + remote.run( + args=[ + 'sudo', + 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes', + '-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw( + 'Dpkg::Options::="--force-confold"'), + 'autoremove', + ], + ) + + +def _remove_sources_list(ctx, config, remote): + builder = _get_builder_project(ctx, remote, config) + builder.remove_repo() + remote.run( + args=[ + 'sudo', 'apt-get', 'update', + ], + check_status=False, + ) + + +def _upgrade_packages(ctx, config, remote, debs): + """ + Upgrade project's packages on remote Debian host + Before doing so, installs the project's GPG key, writes a sources.list + file, and runs ``apt-get update``. + + :param ctx: the argparse.Namespace object + :param config: the config dict + :param remote: the teuthology.orchestra.remote.Remote object + :param debs: the Debian packages to be installed + :param branch: the branch of the project to be used + """ + # check for ceph release key + r = remote.run( + args=[ + 'sudo', 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph', + ], + stdout=StringIO(), + check_status=False, + ) + if r.stdout.getvalue().find('Ceph automated package') == -1: + # if it doesn't exist, add it + remote.run( + args=[ + 'wget', '-q', '-O-', + 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', # noqa + run.Raw('|'), + 'sudo', 'apt-key', 'add', '-', + ], + stdout=StringIO(), + ) + + builder = _get_builder_project(ctx, remote, config) + base_url = builder.base_url + log.info('Pulling from %s', base_url) + + version = builder.version + log.info('Package version is %s', version) + + builder.install_repo() + + remote.run(args=['sudo', 'apt-get', 'update'], check_status=False) + install_dep_packages(remote, + args=[ + 'sudo', + 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes', + '-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw( + 'Dpkg::Options::="--force-confold"'), + 'install', + ] + ['%s=%s' % (d, version) for d in debs], + ) diff --git a/teuthology/task/install/packages.yaml b/teuthology/task/install/packages.yaml new file mode 100644 index 0000000000..1e8916ae51 --- /dev/null +++ b/teuthology/task/install/packages.yaml @@ -0,0 +1,37 @@ +--- +ceph: + deb: + - ceph + - ceph-mds + - ceph-common + - ceph-fuse + - ceph-test + - radosgw + - python-ceph + - libcephfs1 + - libcephfs-java + - libcephfs-jni + - librados2 + - librbd1 + - rbd-fuse + - ceph-dbg + - ceph-mds-dbg + - ceph-common-dbg + - ceph-fuse-dbg + - radosgw-dbg + - libcephfs1-dbg + - librados2-dbg + - librbd1-dbg + rpm: + - ceph-radosgw + - ceph-test + - ceph + - ceph-fuse + - cephfs-java + - libcephfs_jni1 + - libcephfs1 + - librados2 + - librbd1 + - python-ceph + - rbd-fuse + - ceph-debuginfo diff --git a/teuthology/task/install/redhat.py b/teuthology/task/install/redhat.py new file mode 100644 index 0000000000..5118088655 --- /dev/null +++ b/teuthology/task/install/redhat.py @@ -0,0 +1,217 @@ +import contextlib +import logging +import yaml +import os + +from teuthology import packaging +from teuthology.orchestra import run +from teuthology.parallel import parallel +from teuthology.config import config as teuth_config + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def install(ctx, config): + """ + Installs rh ceph on all hosts in ctx. + + :param ctx: the argparse.Namespace object + :param config: the config dict + + uses yaml defined in qa suite or in users + home dir to check for supported versions and + packages to install. + + the format of yaml is: + versions: + supported: + - '1.3.0' + rpm: + mapped: + '1.3.0' : '0.94.1' + deb: + mapped: + '1.3.0' : '0.94.1' + pkgs: + rpm: + - ceph-mon + - ceph-osd + deb: + - ceph-osd + - ceph-mds + """ + # Look for rh specific packages + ds_yaml = os.path.join( + teuth_config.get('ds_yaml_dir'), + config.get('rhbuild') + ".yaml", + ) + if not os.path.exists(ds_yaml): + raise FileNotFoundError(f'Downstream rh version yaml file missing: {ds_yaml}') + log.info("using yaml path %s", ds_yaml) + downstream_config = yaml.safe_load(open(ds_yaml)) + rh_versions = downstream_config.get('versions', dict()).get('supported', []) + external_config = dict(extra_system_packages=config.get('extra_system_packages'), + extra_packages=config.get('extra_packages'), + ) + downstream_config.update(external_config) + version = config.get('rhbuild') + if version in rh_versions: + log.info("%s is a supported version", version) + else: + raise RuntimeError("Unsupported RH Ceph version %s", version) + with parallel() as p: + for remote in ctx.cluster.remotes.keys(): + if remote.os.name == 'rhel': + log.info("Installing on RHEL node: %s", remote.shortname) + p.spawn(install_pkgs, ctx, remote, version, downstream_config) + else: + log.info("Install on Ubuntu node: %s", remote.shortname) + p.spawn(install_deb_pkgs, ctx, remote, version, + downstream_config) + try: + yield + finally: + if config.get('skip_uninstall'): + log.info("Skipping uninstall of Ceph") + else: + with parallel() as p: + for remote in ctx.cluster.remotes.keys(): + p.spawn(uninstall_pkgs, ctx, remote, downstream_config) + + +def install_pkgs(ctx, remote, version, downstream_config): + """ + Installs RH build using ceph-deploy. + + :param ctx: the argparse.Namespace object + :param remote: the teuthology.orchestra.remote.Remote object + :param downstream_config the dict object that has downstream pkg info + """ + rh_version_check = downstream_config.get('versions').get('rpm').get('mapped') + rh_rpm_pkgs = downstream_config.get('pkgs').get('rpm') + extras = [downstream_config.get('extra_system_packages'), + downstream_config.get('extra_packages')] + for extra in extras: + if isinstance(extra, dict): + rh_rpm_pkgs += extra.get('rpm', []) + elif isinstance(extra, list): + rh_rpm_pkgs += extra + pkgs = str.join(' ', rh_rpm_pkgs) + + log.info("Remove any epel packages installed on node %s", remote.shortname) + # below packages can come from epel and still work, ensure we use cdn pkgs + remote.run( + args=[ + 'sudo', + 'yum', + 'remove', + run.Raw("leveldb xmlstarlet fcgi"), + '-y'], + check_status=False) + + log.info("Installing redhat ceph packages") + remote.run(args=['sudo', 'yum', '-y', 'install', + run.Raw(pkgs)]) + # check package version + installed_version = packaging.get_package_version(remote, 'ceph-common') + log.info( + "Node: {n} Ceph version installed is {v}".format( + n=remote.shortname, + v=version)) + req_ver = rh_version_check[version] + if installed_version.startswith(req_ver): + log.info("Installed version matches on %s", remote.shortname) + else: + raise RuntimeError("Version check failed on node %s", remote.shortname) + + +def set_deb_repo(remote, deb_repo, deb_gpg_key=None): + """ + Sets up debian repo and gpg key for package verification + :param remote - remote node object + :param deb_repo - debian repo root path + :param deb_gpg_key - gpg key for the package + """ + repos = ['MON', 'OSD', 'Tools'] + log.info("deb repo: %s", deb_repo) + log.info("gpg key url: %s", deb_gpg_key) + # remove any additional repo so that upstream packages are not used + # all required packages come from downstream repo + remote.run(args=['sudo', 'rm', '-f', run.Raw('/etc/apt/sources.list.d/*')], + check_status=False) + for repo in repos: + cmd = 'echo deb {root}/{repo} $(lsb_release -sc) main'.format( + root=deb_repo, repo=repo) + remote.run(args=['sudo', run.Raw(cmd), run.Raw('>'), + "/tmp/{0}.list".format(repo)]) + remote.run(args=['sudo', 'cp', "/tmp/{0}.list".format(repo), + '/etc/apt/sources.list.d/']) + # add ds gpgkey + ds_keys = ['https://www.redhat.com/security/897da07a.txt', + 'https://www.redhat.com/security/f21541eb.txt'] + if deb_gpg_key is not None: + ds_keys.append(deb_gpg_key) + for key in ds_keys: + wget_cmd = 'wget -O - ' + key + remote.run(args=['sudo', run.Raw(wget_cmd), + run.Raw('|'), 'sudo', 'apt-key', 'add', run.Raw('-')]) + remote.run(args=['sudo', 'apt-get', 'update']) + + +def install_deb_pkgs( + ctx, + remote, + version, + downstream_config): + """ + Setup debian repo, Install gpg key + and Install on debian packages + : param ctx + : param remote + : param downstream_config the dict object that has downstream pkg info + """ + rh_version_check = downstream_config.get('versions').get('deb').get('mapped') + rh_deb_pkgs = downstream_config.get('pkgs').get('deb') + extras = [downstream_config.get('extra_system_packages'), + downstream_config.get('extra_packages')] + for extra in extras: + if isinstance(extra, dict): + rh_deb_pkgs += extra.get('deb', []) + elif isinstance(extra, list): + rh_deb_pkgs += extra + pkgs = str.join(' ', rh_deb_pkgs) + log.info("Installing redhat ceph packages") + remote.run(args=['sudo', 'apt-get', '-y', 'install', + run.Raw(pkgs)]) + # check package version + installed_version = packaging.get_package_version(remote, 'ceph-common') + log.info( + "Node: {n} Ceph version installed is {v}".format( + n=remote.shortname, + v=version)) + req_ver = rh_version_check[version] + if installed_version.startswith(req_ver): + log.info("Installed version matches on %s", remote.shortname) + else: + raise RuntimeError("Version check failed on node %s", remote.shortname) + + +def uninstall_pkgs(ctx, remote, downstream_config): + """ + Removes Ceph from all RH hosts + + :param ctx: the argparse.Namespace object + :param remote: the teuthology.orchestra.remote.Remote object + :param downstream_config the dict object that has downstream pkg info + """ + + if remote.os.name == 'rhel': + pkgs = downstream_config.get('pkgs').get('rpm') + if pkgs: + remote.sh(['sudo', 'yum', 'remove'] + pkgs + ['-y']) + else: + pkgs = downstream_config.get('pkgs').get('deb') + if pkgs: + remote.sh(['sudo', 'apt-get', 'remove'] + pkgs + ['-y']) + remote.run(args=['sudo', 'rm', '-rf', '/var/lib/ceph']) diff --git a/teuthology/task/install/rpm.py b/teuthology/task/install/rpm.py new file mode 100644 index 0000000000..f11fd8bc6e --- /dev/null +++ b/teuthology/task/install/rpm.py @@ -0,0 +1,432 @@ +import logging +import os.path +from io import StringIO + +from distutils.version import LooseVersion + +from teuthology.config import config as teuth_config +from teuthology.contextutil import safe_while +from teuthology.orchestra import run +from teuthology import packaging + +from teuthology.task.install.util import _get_builder_project, _get_local_dir + +log = logging.getLogger(__name__) + + +def _remove(ctx, config, remote, rpm): + """ + Removes RPM packages from remote + + :param ctx: the argparse.Namespace object + :param config: the config dict + :param remote: the teuthology.orchestra.remote.Remote object + :param rpm: list of packages names to remove + """ + remote_os = remote.os + dist_release = remote_os.name + + install_ceph_packages = config.get('install_ceph_packages') + if install_ceph_packages: + log.info("Removing packages: {pkglist} on rpm system.".format( + pkglist=", ".join(rpm))) + if dist_release in ['opensuse', 'sle']: + remote.run(args=''' + for d in {rpms} ; do + sudo zypper -n --no-gpg-checks remove --capability $d || true + done'''.format(rpms=' '.join(rpm))) + remote.run(args='sudo zypper clean -a') + else: + remote.run(args=''' + for d in {rpms} ; do + sudo yum -y remove $d || true + done'''.format(rpms=' '.join(rpm))) + remote.run(args='sudo yum clean all') + else: + log.info("install task did not install any packages, " + "so not removing any, either") + + repos = config.get('repos') + if repos: + if dist_release in ['opensuse', 'sle']: + _zypper_removerepo(remote, repos) + else: + raise Exception('Custom repos were specified for %s ' % remote_os + + 'but these are currently not supported') + else: + builder = _get_builder_project(ctx, remote, config) + builder.remove_repo() + + if dist_release in ['opensuse', 'sle']: + #remote.run(args='sudo zypper clean -a') + log.info("Not cleaning zypper cache: this might fail, and is not needed " + "because the test machine will be destroyed or reimaged anyway") + else: + remote.run(args='sudo yum clean expire-cache') + + +def _zypper_addrepo(remote, repo_list): + """ + Add zypper repos to the remote system. + + :param remote: remote node where to add packages + :param repo_list: list of dictionaries with keys 'name', 'url' + :return: + """ + for repo in repo_list: + if 'priority' in repo: + remote.run(args=[ + 'sudo', 'zypper', '-n', 'addrepo', '--refresh', '--no-gpgcheck', + '-p', str(repo['priority']), repo['url'], repo['name'], + ]) + else: + remote.run(args=[ + 'sudo', 'zypper', '-n', 'addrepo', '--refresh', '--no-gpgcheck', + repo['url'], repo['name'], + ]) + # Because 'zypper addrepo --check' does not work as expected + # we need call zypper ref in order to fail early if the repo + # is invalid + remote.run(args='sudo zypper ref ' + repo['name']) + +def _zypper_removerepo(remote, repo_list): + """ + Remove zypper repos on the remote system. + + :param remote: remote node where to remove packages from + :param repo_list: list of dictionaries with keys 'name', 'url' + :return: + """ + for repo in repo_list: + remote.run(args=[ + 'sudo', 'zypper', '-n', 'removerepo', repo['name'], + ]) + +def _zypper_wipe_all_repos(remote): + """ + Completely "wipe" (remove) all zypper repos + + :param remote: remote node where to wipe zypper repos + :return: + """ + log.info("Wiping zypper repos (if any)") + remote.sh('sudo zypper repos -upEP && ' + 'sudo rm -f /etc/zypp/repos.d/* || ' + 'true') + +def _downgrade_packages(ctx, remote, pkgs, pkg_version, config): + """ + Downgrade packages listed by 'downgrade_packages' + + Downgrade specified packages to given version. The list of packages + downgrade is provided by 'downgrade_packages' as a property of "install" + task. + + :param ctx: the argparse.Namespace object + :param remote: the teuthology.orchestra.remote.Remote object + :param pkgs: list of package names to install + :param pkg_version: the version to which all packages will be downgraded + :param config: the config dict + :return: list of package names from 'pkgs' which are not yet + installed/downgraded + """ + downgrade_pkgs = config.get('downgrade_packages', []) + if not downgrade_pkgs: + return pkgs + log.info('Downgrading packages: {pkglist}'.format( + pkglist=', '.join(downgrade_pkgs))) + # assuming we are going to downgrade packages with the same version + first_pkg = downgrade_pkgs[0] + installed_version = packaging.get_package_version(remote, first_pkg) + assert installed_version, "failed to get version of {}".format(first_pkg) + assert LooseVersion(installed_version) > LooseVersion(pkg_version) + # to compose package name like "librados2-0.94.10-87.g116a558.el7" + pkgs_opt = ['-'.join([pkg, pkg_version]) for pkg in downgrade_pkgs] + remote.run(args='sudo yum -y downgrade {}'.format(' '.join(pkgs_opt))) + return [pkg for pkg in pkgs if pkg not in downgrade_pkgs] + +def _retry_if_failures_are_recoverable(remote, args): + # wait at most 5 minutes + with safe_while(sleep=10, tries=30) as proceed: + while proceed(): + stdout = StringIO() + stderr = StringIO() + try: + return remote.run(args=args, stderr=stderr, stdout=stdout) + except run.CommandFailedError: + if "status code: 503" in stdout.getvalue().lower(): + continue + if "failed to download metadata for repo" in stderr.getvalue().lower(): + continue + else: + raise + +def _update_package_list_and_install(ctx, remote, rpm, config): + """ + Installs the repository for the relevant branch, then installs + the requested packages on the remote system. + + TODO: split this into at least two functions. + + :param ctx: the argparse.Namespace object + :param remote: the teuthology.orchestra.remote.Remote object + :param rpm: list of packages names to install + :param config: the config dict + """ + # rpm does not force installation of a particular version of the project + # packages, so we can put extra_system_packages together with the rest + system_pkglist = config.get('extra_system_packages') + if system_pkglist: + if isinstance(system_pkglist, dict): + rpm += system_pkglist.get('rpm') + else: + rpm += system_pkglist + remote_os = remote.os + + dist_release = remote_os.name + log.debug("_update_package_list_and_install: config is {}".format(config)) + repos = config.get('repos') + install_ceph_packages = config.get('install_ceph_packages') + repos_only = config.get('repos_only') + + if repos: + log.debug("Adding repos: %s" % repos) + if dist_release in ['opensuse', 'sle']: + _zypper_wipe_all_repos(remote) + _zypper_addrepo(remote, repos) + else: + raise Exception('Custom repos were specified for %s ' % remote_os + + 'but these are currently not supported') + else: + builder = _get_builder_project(ctx, remote, config) + log.info('Pulling from %s', builder.base_url) + log.info('Package version is %s', builder.version) + builder.install_repo() + + if repos_only: + log.info("repos_only was specified: not installing any packages") + return None + + if not install_ceph_packages: + log.info("install_ceph_packages set to False: not installing Ceph packages") + # Although "librados2" is an indirect dependency of ceph-test, we + # install it separately because, otherwise, ceph-test cannot be + # installed (even with --force) when there are several conflicting + # repos from different vendors. + rpm = ["librados2", "ceph-test"] + + # rpm does not force installation of a particular version of the project + # packages, so we can put extra_system_packages together with the rest + system_pkglist = config.get('extra_system_packages', []) + if system_pkglist: + if isinstance(system_pkglist, dict): + rpm += system_pkglist.get('rpm') + else: + rpm += system_pkglist + + log.info("Installing packages: {pkglist} on remote rpm {arch}".format( + pkglist=", ".join(rpm), arch=remote.arch)) + + if dist_release not in ['opensuse', 'sle']: + project = builder.project + uri = builder.uri_reference + _yum_fix_repo_priority(remote, project, uri) + _yum_fix_repo_host(remote, project) + _yum_set_check_obsoletes(remote) + + if dist_release in ['opensuse', 'sle']: + remote.run(args='sudo zypper clean -a') + else: + remote.run(args='sudo yum clean all') + + ldir = _get_local_dir(config, remote) + + if dist_release in ['opensuse', 'sle']: + remove_cmd = 'sudo zypper -n remove --capability' + # NOTE: --capability contradicts --force + install_cmd = 'sudo zypper -n --no-gpg-checks install --force --no-recommends' + else: + remove_cmd = 'sudo yum -y remove' + install_cmd = 'sudo yum -y install' + # to compose version string like "0.94.10-87.g116a558.el7" + pkg_version = '.'.join([builder.version, builder.dist_release]) + rpm = _downgrade_packages(ctx, remote, rpm, pkg_version, config) + + if system_pkglist: + _retry_if_failures_are_recoverable(remote, + args='{install_cmd} {rpms}' + .format(install_cmd=install_cmd, rpms=' '.join(rpm)) + ) + else: + for cpack in rpm: + if ldir: + _retry_if_failures_are_recoverable(remote, + args=''' + if test -e {pkg} ; then + {remove_cmd} {pkg} ; + {install_cmd} {pkg} ; + else + {install_cmd} {cpack} ; + fi + '''.format(remove_cmd=remove_cmd, + install_cmd=install_cmd, + pkg=os.path.join(ldir, cpack), + cpack=cpack)) + else: + _retry_if_failures_are_recoverable(remote, + args='{install_cmd} {cpack}' + .format(install_cmd=install_cmd, cpack=cpack) + ) + +def _yum_fix_repo_priority(remote, project, uri): + """ + On the remote, 'priority=1' lines to each enabled repo in: + + /etc/yum.repos.d/{project}.repo + + :param remote: the teuthology.orchestra.remote.Remote object + :param project: the project whose repos need modification + """ + repo_path = '/etc/yum.repos.d/%s.repo' % project + remote.run( + args=[ + 'if', 'test', '-f', repo_path, run.Raw(';'), 'then', + 'sudo', 'sed', '-i', '-e', + run.Raw('\':a;N;$!ba;s/enabled=1\\ngpg/enabled=1\\npriority=1\\ngpg/g\''), + '-e', + run.Raw("'s;ref/[a-zA-Z0-9_-]*/;{uri}/;g'".format(uri=uri)), + repo_path, run.Raw(';'), 'fi' + ] + ) + + +def _yum_fix_repo_host(remote, project): + """ + Update the hostname to reflect the gitbuilder_host setting. + """ + # Skip this bit if we're not using gitbuilder + if not isinstance(packaging.get_builder_project(), + packaging.GitbuilderProject): + return + old_host = teuth_config._defaults['gitbuilder_host'] + new_host = teuth_config.gitbuilder_host + if new_host == old_host: + return + repo_path = '/etc/yum.repos.d/%s.repo' % project + host_sed_expr = "'s/{0}/{1}/'".format(old_host, new_host) + remote.run( + args=[ + 'if', 'test', '-f', repo_path, run.Raw(';'), 'then', + 'sudo', 'sed', '-i', '-e', run.Raw(host_sed_expr), + repo_path, run.Raw(';'), 'fi'] + ) + + +def _yum_set_check_obsoletes(remote): + """ + Set check_obsoletes = 1 in /etc/yum/pluginconf.d/priorities.conf + + Creates a backup at /etc/yum/pluginconf.d/priorities.conf.orig so we can + restore later. + """ + conf_path = '/etc/yum/pluginconf.d/priorities.conf' + conf_path_orig = conf_path + '.orig' + cmd = [ + 'sudo', 'touch', '-a', '/etc/yum/pluginconf.d/priorities.conf', run.Raw(';'), + 'test', '-e', conf_path_orig, run.Raw('||'), 'sudo', 'cp', '-af', + conf_path, conf_path_orig, + ] + remote.run(args=cmd) + cmd = [ + 'grep', 'check_obsoletes', conf_path, run.Raw('&&'), 'sudo', 'sed', + '-i', 's/check_obsoletes.*0/check_obsoletes = 1/g', conf_path, + run.Raw('||'), 'echo', 'check_obsoletes = 1', run.Raw('|'), 'sudo', + 'tee', '-a', conf_path, + ] + remote.run(args=cmd) + + +def _yum_unset_check_obsoletes(remote): + """ + Restore the /etc/yum/pluginconf.d/priorities.conf backup + """ + conf_path = '/etc/yum/pluginconf.d/priorities.conf' + conf_path_orig = conf_path + '.orig' + remote.run(args=['sudo', 'mv', '-f', conf_path_orig, conf_path], + check_status=False) + + +def _remove_sources_list(ctx, config, remote): + """ + Removes /etc/yum.repos.d/{proj}.repo + + :param remote: the teuthology.orchestra.remote.Remote object + :param proj: the project whose .repo needs removing + """ + builder = _get_builder_project(ctx, remote, config) + builder.remove_repo() + if remote.os.name not in ['opensuse', 'sle']: + _yum_unset_check_obsoletes(remote) + + +def _upgrade_packages(ctx, config, remote, pkgs): + """ + Upgrade project's packages on remote RPM-based host + Before doing so, it makes sure the project's repository is installed - + removing any previous version first. + + :param ctx: the argparse.Namespace object + :param config: the config dict + :param remote: the teuthology.orchestra.remote.Remote object + :param pkgs: the RPM packages to be installed + :param branch: the branch of the project to be used + """ + builder = _get_builder_project(ctx, remote, config) + log.info( + "Host {host} is: {distro} {ver} {arch}".format( + host=remote.shortname, + distro=builder.os_type, + ver=builder.os_version, + arch=builder.arch,) + ) + + base_url = builder.base_url + log.info('Repo base URL: %s', base_url) + project = builder.project + + # Remove the repository before re-adding it + builder.remove_repo() + builder.install_repo() + + if builder.dist_release not in ['opensuse', 'sle']: + uri = builder.uri_reference + _yum_fix_repo_priority(remote, project, uri) + _yum_fix_repo_host(remote, project) + _yum_set_check_obsoletes(remote) + + if builder.dist_release in ['opensuse', 'sle']: + pkg_mng_cmd = 'zypper' + pkg_mng_opts = '-a' + else: + pkg_mng_cmd = 'yum' + pkg_mng_opts = 'all' + + remote.run( + args=[ + 'sudo', pkg_mng_cmd, 'clean', pkg_mng_opts, + ]) + + # Actually upgrade the project packages + if builder.dist_release in ['opensuse', 'sle']: + pkg_mng_opts = '-n' + pkg_mng_subcommand = 'install' + pkg_mng_subcommand_opts = ['--capability', '--no-recommends'] + else: + pkg_mng_opts = '-y' + pkg_mng_subcommand = 'upgrade' + pkg_mng_subcommand_opts = [] + args = ['sudo', pkg_mng_cmd, pkg_mng_opts, pkg_mng_subcommand] + if pkg_mng_subcommand_opts: + args += pkg_mng_subcommand_opts + args += pkgs + remote.run(args=args) diff --git a/teuthology/task/install/util.py b/teuthology/task/install/util.py new file mode 100644 index 0000000000..0fec21a613 --- /dev/null +++ b/teuthology/task/install/util.py @@ -0,0 +1,153 @@ +import contextlib +import logging +import os + +from teuthology import misc as teuthology +from teuthology import packaging +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +def _get_builder_project(ctx, remote, config): + return packaging.get_builder_project()( + config.get('project', 'ceph'), + config, + remote=remote, + ctx=ctx + ) + + +def _get_local_dir(config, remote): + """ + Extract local directory name from the task lists. + Copy files over to the remote site. + """ + ldir = config.get('local', None) + if ldir: + remote.run(args=['sudo', 'mkdir', '-p', ldir]) + for fyle in os.listdir(ldir): + fname = "%s/%s" % (ldir, fyle) + teuthology.sudo_write_file( + remote, fname, open(fname).read(), '644') + return ldir + + +def get_flavor(config): + """ + Determine the flavor to use. + """ + config = config or dict() + flavor = config.get('flavor', 'default') + + if config.get('path'): + # local dir precludes any other flavors + flavor = 'local' + else: + if config.get('valgrind'): + flavor = 'notcmalloc' + else: + if config.get('coverage'): + flavor = 'gcov' + return flavor + +def _ship_utilities(ctx): + """ + Write a copy of valgrind.supp to each of the remote sites. Set executables + used by Ceph in /usr/local/bin. When finished (upon exit of the teuthology + run), remove these files. + + :param ctx: Context + """ + testdir = teuthology.get_testdir(ctx) + filenames = [] + + log.info('Shipping valgrind.supp...') + assert 'suite_path' in ctx.config + try: + with open( + os.path.join(ctx.config['suite_path'], 'valgrind.supp'), + 'rb' + ) as f: + fn = os.path.join(testdir, 'valgrind.supp') + filenames.append(fn) + for rem in ctx.cluster.remotes.keys(): + teuthology.sudo_write_file( + remote=rem, + path=fn, + data=f, + ) + f.seek(0) + except IOError as e: + log.info('Cannot ship supression file for valgrind: %s...', e.strerror) + + FILES = ['daemon-helper', 'adjust-ulimits'] + destdir = '/usr/bin' + for filename in FILES: + log.info('Shipping %r...', filename) + src = os.path.join(os.path.dirname(__file__), filename) + dst = os.path.join(destdir, filename) + filenames.append(dst) + with open(src, 'rb') as f: + for rem in ctx.cluster.remotes.keys(): + teuthology.sudo_write_file( + remote=rem, + path=dst, + data=f, + ) + f.seek(0) + rem.run( + args=[ + 'sudo', + 'chmod', + 'a=rx', + '--', + dst, + ], + ) + return filenames + +def _remove_utilities(ctx, filenames): + """ + Remove the shipped utilities. + + :param ctx: Context + :param filenames: The utilities install paths + """ + log.info('Removing shipped files: %s...', ' '.join(filenames)) + if filenames == []: + return + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'rm', + '-f', + '--', + ] + list(filenames), + wait=False, + ), + ) + +@contextlib.contextmanager +def ship_utilities(ctx, config): + """ + Ship utilities during the first call, and skip it in the following ones. + See also `_ship_utilities`. + + :param ctx: Context + :param config: Configuration + """ + assert config is None + + do_ship_utilities = ctx.get('do_ship_utilities', True) + if do_ship_utilities: + ctx['do_ship_utilities'] = False + filenames = _ship_utilities(ctx) + try: + yield + finally: + _remove_utilities(ctx, filenames) + else: + log.info('Utilities already shipped, skip it...') + yield diff --git a/teuthology/task/interactive.py b/teuthology/task/interactive.py new file mode 100644 index 0000000000..dd1676e49a --- /dev/null +++ b/teuthology/task/interactive.py @@ -0,0 +1,40 @@ +""" +Drop into a python shell +""" +import code +import readline +import rlcompleter +rlcompleter.__name__ # silence pyflakes +import pprint + +readline.parse_and_bind('tab: complete') + +def task(ctx, config): + """ + Run an interactive Python shell, with the cluster accessible via + the ``ctx`` variable. + + Hit ``control-D`` to continue. + + This is also useful to pause the execution of the test between two + tasks, either to perform ad hoc operations, or to examine the + state of the cluster. You can also use it to easily bring up a + Ceph cluster for ad hoc testing. + + For example:: + + tasks: + - ceph: + - interactive: + """ + + pp = pprint.PrettyPrinter().pprint + code.interact( + banner='Ceph test interactive mode, use ctx to interact with the cluster, press control-D to exit...', + # TODO simplify this + local=dict( + ctx=ctx, + config=config, + pp=pp, + ), + ) diff --git a/teuthology/task/internal/__init__.py b/teuthology/task/internal/__init__.py new file mode 100644 index 0000000000..d85b3f212e --- /dev/null +++ b/teuthology/task/internal/__init__.py @@ -0,0 +1,527 @@ +""" +Internal tasks are tasks that are started from the teuthology infrastructure. +Note that there is no corresponding task defined for this module. All of +the calls are made from other modules, most notably teuthology/run.py +""" +import contextlib +import functools +import gzip +import logging +import os +import shutil +import time +import yaml +import subprocess + +import humanfriendly + +import teuthology.lock.ops +from teuthology import misc +from teuthology.packaging import get_builder_project +from teuthology import report +from teuthology.config import config as teuth_config +from teuthology.exceptions import ConfigError, VersionNotFoundError +from teuthology.job_status import get_status, set_status +from teuthology.orchestra import cluster, remote, run +# the below import with noqa is to workaround run.py which does not support multilevel submodule import +from teuthology.task.internal.redhat import (setup_cdn_repo, setup_base_repo, # noqa + setup_additional_repo, # noqa + setup_stage_cdn, setup_container_registry) # noqa + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def base(ctx, config): + """ + Create the test directory that we will be using on the remote system + """ + log.info('Creating test directory...') + testdir = misc.get_testdir(ctx) + run.wait( + ctx.cluster.run( + args=['mkdir', '-p', '-m0755', '--', testdir], + wait=False, + ) + ) + try: + yield + finally: + log.info('Tidying up after the test...') + # if this fails, one of the earlier cleanups is flawed; don't + # just cram an rm -rf here + run.wait( + ctx.cluster.run( + args=['find', testdir, '-ls', + run.Raw(';'), + 'rmdir', '--', testdir], + wait=False, + ), + ) + + +def save_config(ctx, config): + """ + Store the config in a yaml file + """ + log.info('Saving configuration') + if ctx.archive is not None: + with open(os.path.join(ctx.archive, 'config.yaml'), 'w') as f: + yaml.safe_dump(ctx.config, f, default_flow_style=False) + + +def check_packages(ctx, config): + """ + Checks gitbuilder to determine if there are missing packages for this job. + + If there are missing packages, fail the job. + """ + for task in ctx.config['tasks']: + if list(task.keys())[0] == 'buildpackages': + log.info("Checking packages skipped because " + "the task buildpackages was found.") + return + + log.info("Checking packages...") + os_type = ctx.config.get("os_type") + sha1 = ctx.config.get("sha1") + # We can only do this check if there are a defined sha1 and os_type + # in the job config. + if os_type and sha1: + package = get_builder_project()("ceph", ctx.config) + template = "Checking packages for os_type '{os}', " \ + "flavor '{flav}' and ceph hash '{ver}'" + log.info( + template.format( + os=package.os_type, + flav=package.flavor, + ver=package.sha1, + ) + ) + if package.version: + log.info("Found packages for ceph version {ver}".format( + ver=package.version + )) + else: + msg = "Packages for distro '{d}' and ceph hash '{ver}' not found" + msg = msg.format( + d=package.distro, + ver=package.sha1, + ) + log.error(msg) + # set the failure message and update paddles with the status + ctx.summary["failure_reason"] = msg + set_status(ctx.summary, "dead") + report.try_push_job_info(ctx.config, dict(status='dead')) + raise VersionNotFoundError(package.base_url) + else: + log.info( + "Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format( + os=os_type, + ver=sha1, + ) + ) + + +@contextlib.contextmanager +def timer(ctx, config): + """ + Start the timer used by teuthology + """ + log.info('Starting timer...') + start = time.time() + try: + yield + finally: + duration = time.time() - start + log.info('Duration was %f seconds', duration) + ctx.summary['duration'] = duration + + +def add_remotes(ctx, config): + """ + Create a ctx.cluster object populated with remotes mapped to roles + """ + ctx.cluster = cluster.Cluster() + # Allow jobs to run without using nodes, for self-testing + if 'roles' not in ctx.config and 'targets' not in ctx.config: + return + remotes = [] + machs = [] + for name in ctx.config['targets'].keys(): + machs.append(name) + for t, key in ctx.config['targets'].items(): + t = misc.canonicalize_hostname(t) + try: + if ctx.config['sshkeys'] == 'ignore': + key = None + except (AttributeError, KeyError): + pass + rem = remote.Remote(name=t, host_key=key, keep_alive=True) + remotes.append(rem) + if 'roles' in ctx.config: + for rem, roles in zip(remotes, ctx.config['roles']): + assert all(isinstance(role, str) for role in roles), \ + "Roles in config must be strings: %r" % roles + ctx.cluster.add(rem, roles) + log.info('roles: %s - %s' % (rem, roles)) + else: + for rem in remotes: + ctx.cluster.add(rem, rem.name) + + +def connect(ctx, config): + """ + Connect to all remotes in ctx.cluster + """ + log.info('Opening connections...') + for rem in ctx.cluster.remotes.keys(): + log.debug('connecting to %s', rem.name) + rem.connect() + + +def push_inventory(ctx, config): + if not teuth_config.lock_server: + return + + def push(): + for rem in ctx.cluster.remotes.keys(): + info = rem.inventory_info + teuthology.lock.ops.update_inventory(info) + try: + push() + except Exception: + log.exception("Error pushing inventory") + +BUILDPACKAGES_FIRST = 0 +BUILDPACKAGES_OK = 1 +BUILDPACKAGES_REMOVED = 2 +BUILDPACKAGES_NOTHING = 3 + +def buildpackages_prep(ctx, config): + """ + Make sure the 'buildpackages' task happens before + the 'install' task. + + Return: + + BUILDPACKAGES_NOTHING if there is no buildpackages task + BUILDPACKAGES_REMOVED if there is a buildpackages task but no install task + BUILDPACKAGES_FIRST if a buildpackages task was moved at the beginning + BUILDPACKAGES_OK if a buildpackages task already at the beginning + """ + index = 0 + install_index = None + buildpackages_index = None + buildpackages_prep_index = None + for task in ctx.config['tasks']: + t = list(task)[0] + if t == 'install': + install_index = index + if t == 'buildpackages': + buildpackages_index = index + if t == 'internal.buildpackages_prep': + buildpackages_prep_index = index + index += 1 + if (buildpackages_index is not None and + install_index is not None): + if buildpackages_index > buildpackages_prep_index + 1: + log.info('buildpackages moved to be the first task') + buildpackages = ctx.config['tasks'].pop(buildpackages_index) + ctx.config['tasks'].insert(buildpackages_prep_index + 1, + buildpackages) + return BUILDPACKAGES_FIRST + else: + log.info('buildpackages is already the first task') + return BUILDPACKAGES_OK + elif buildpackages_index is not None and install_index is None: + ctx.config['tasks'].pop(buildpackages_index) + all_tasks = [list(x.keys())[0] for x in ctx.config['tasks']] + log.info('buildpackages removed because no install task found in ' + + str(all_tasks)) + return BUILDPACKAGES_REMOVED + elif buildpackages_index is None: + log.info('no buildpackages task found') + return BUILDPACKAGES_NOTHING + + +def serialize_remote_roles(ctx, config): + """ + Provides an explicit mapping for which remotes have been assigned what roles + So that other software can be loosely coupled to teuthology + """ + if ctx.archive is not None: + with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file: + info_yaml = yaml.safe_load(info_file) + info_file.seek(0) + info_yaml['cluster'] = dict([(rem.name, {'roles': roles}) for rem, roles in ctx.cluster.remotes.items()]) + yaml.safe_dump(info_yaml, info_file, default_flow_style=False) + + +def check_ceph_data(ctx, config): + """ + Check for old /var/lib/ceph subdirectories and detect staleness. + """ + log.info('Checking for non-empty /var/lib/ceph...') + processes = ctx.cluster.run( + args='test -z $(ls -A /var/lib/ceph)', + wait=False, + ) + failed = False + for proc in processes: + try: + proc.wait() + except run.CommandFailedError: + log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname) + failed = True + if failed: + raise RuntimeError('Stale /var/lib/ceph detected, aborting.') + + +def check_conflict(ctx, config): + """ + Note directory use conflicts and stale directories. + """ + log.info('Checking for old test directory...') + testdir = misc.get_testdir(ctx) + processes = ctx.cluster.run( + args=['test', '!', '-e', testdir], + wait=False, + ) + failed = False + for proc in processes: + try: + proc.wait() + except run.CommandFailedError: + log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir) + failed = True + if failed: + raise RuntimeError('Stale jobs detected, aborting.') + + +def fetch_binaries_for_coredumps(path, remote): + """ + Pul ELFs (debug and stripped) for each coredump found + """ + # Check for Coredumps: + coredump_path = os.path.join(path, 'coredump') + if os.path.isdir(coredump_path): + log.info('Transferring binaries for coredumps...') + for dump in os.listdir(coredump_path): + # Pull program from core file + dump_path = os.path.join(coredump_path, dump) + dump_info = subprocess.Popen(['file', dump_path], + stdout=subprocess.PIPE) + dump_out = dump_info.communicate()[0].decode() + + # Parse file output to get program, Example output: + # 1422917770.7450.core: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, \ + # from 'radosgw --rgw-socket-path /home/ubuntu/cephtest/apache/tmp.client.0/fastcgi_soc' + log.info(f' core looks like: {dump_out}') + dump_program = dump_out.split("from '")[1].split(' ')[0] + + # Find path on remote server: + remote_path = remote.sh(['which', dump_program]).rstrip() + + # Pull remote program into coredump folder: + local_path = os.path.join(coredump_path, + dump_program.lstrip(os.path.sep)) + local_dir = os.path.dirname(local_path) + if not os.path.exists(local_dir): + os.makedirs(local_dir) + remote._sftp_get_file(remote_path, local_path) + + # Pull Debug symbols: + debug_path = os.path.join('/usr/lib/debug', remote_path) + + # RPM distro's append their non-stripped ELF's with .debug + # When deb based distro's do not. + if remote.system_type == 'rpm': + debug_path = '{debug_path}.debug'.format(debug_path=debug_path) + + remote.get_file(debug_path, coredump_path) + + +def gzip_if_too_large(compress_min_size, src, tarinfo, local_path): + if tarinfo.size >= compress_min_size: + with gzip.open(local_path + '.gz', 'wb') as dest: + shutil.copyfileobj(src, dest) + else: + misc.copy_fileobj(src, tarinfo, local_path) + + +@contextlib.contextmanager +def archive(ctx, config): + """ + Handle the creation and deletion of the archive directory. + """ + log.info('Creating archive directory...') + archive_dir = misc.get_archive_dir(ctx) + run.wait( + ctx.cluster.run( + args=['install', '-d', '-m0755', '--', archive_dir], + wait=False, + ) + ) + + # Add logs directory to job's info log file + misc.add_remote_path(ctx, 'init', archive_dir) + + try: + yield + except Exception: + # we need to know this below + set_status(ctx.summary, 'fail') + raise + finally: + passed = get_status(ctx.summary) == 'pass' + if ctx.archive is not None and \ + not (ctx.config.get('archive-on-error') and passed): + log.info('Transferring archived files...') + logdir = os.path.join(ctx.archive, 'remote') + if (not os.path.exists(logdir)): + os.mkdir(logdir) + for rem in ctx.cluster.remotes.keys(): + path = os.path.join(logdir, rem.shortname) + min_size_option = ctx.config.get('log-compress-min-size', + '128MB') + try: + compress_min_size_bytes = \ + humanfriendly.parse_size(min_size_option) + except humanfriendly.InvalidSize: + msg = 'invalid "log-compress-min-size": {}'.format(min_size_option) + log.error(msg) + raise ConfigError(msg) + maybe_compress = functools.partial(gzip_if_too_large, + compress_min_size_bytes) + misc.pull_directory(rem, archive_dir, path, maybe_compress) + # Check for coredumps and pull binaries + fetch_binaries_for_coredumps(path, rem) + + log.info('Removing archive directory...') + run.wait( + ctx.cluster.run( + args=['rm', '-rf', '--', archive_dir], + wait=False, + ), + ) + + +@contextlib.contextmanager +def sudo(ctx, config): + """ + Enable use of sudo + """ + log.info('Configuring sudo...') + sudoers_file = '/etc/sudoers' + backup_ext = '.orig.teuthology' + tty_expr = r's/^\([^#]*\) \(requiretty\)/\1 !\2/g' + pw_expr = r's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' + + run.wait( + ctx.cluster.run( + args="sudo sed -i{ext} -e '{tty}' -e '{pw}' {path}".format( + ext=backup_ext, tty=tty_expr, pw=pw_expr, + path=sudoers_file + ), + wait=False, + ) + ) + try: + yield + finally: + log.info('Restoring {0}...'.format(sudoers_file)) + ctx.cluster.run( + args="sudo mv -f {path}{ext} {path}".format( + path=sudoers_file, ext=backup_ext + ) + ) + + +@contextlib.contextmanager +def coredump(ctx, config): + """ + Stash a coredump of this system if an error occurs. + """ + log.info('Enabling coredump saving...') + cluster = ctx.cluster.filter(lambda r: not r.is_container) + archive_dir = misc.get_archive_dir(ctx) + run.wait( + cluster.run( + args=[ + 'install', '-d', '-m0755', '--', + '{adir}/coredump'.format(adir=archive_dir), + run.Raw('&&'), + 'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir), + run.Raw('&&'), + 'echo', + 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir), + run.Raw('|'), + 'sudo', 'tee', '-a', '/etc/sysctl.conf', + ], + wait=False, + ) + ) + + try: + yield + finally: + cluster = ctx.cluster.filter(lambda r: not r.is_container) + run.wait( + cluster.run( + args=[ + 'sudo', 'sysctl', '-w', 'kernel.core_pattern=core', + run.Raw('&&'), + 'sudo', 'bash', '-c', + (f'for f in `find {archive_dir}/coredump -type f`; do ' + 'file $f | grep -q systemd-sysusers && rm $f || true ; ' + 'done'), + run.Raw('&&'), + # don't litter the archive dir if there were no cores dumped + 'rmdir', + '--ignore-fail-on-non-empty', + '--', + '{adir}/coredump'.format(adir=archive_dir), + ], + wait=False, + ) + ) + + # set status = 'fail' if the dir is still there = coredumps were + # seen + for rem in cluster.remotes.keys(): + try: + rem.sh("test -e " + archive_dir + "/coredump") + except run.CommandFailedError: + continue + log.warning('Found coredumps on %s, flagging run as failed', rem) + set_status(ctx.summary, 'fail') + if 'failure_reason' not in ctx.summary: + ctx.summary['failure_reason'] = \ + 'Found coredumps on {rem}'.format(rem=rem) + + +@contextlib.contextmanager +def archive_upload(ctx, config): + """ + Upload the archive directory to a designated location + """ + try: + yield + finally: + upload = ctx.config.get('archive_upload') + archive_path = ctx.config.get('archive_path') + if upload and archive_path: + log.info('Uploading archives ...') + upload_key = ctx.config.get('archive_upload_key') + if upload_key: + ssh = "RSYNC_RSH='ssh -i " + upload_key + "'" + else: + ssh = '' + split_path = archive_path.split('/') + split_path.insert(-2, '.') + misc.sh(ssh + " rsync -avz --relative /" + + os.path.join(*split_path) + " " + + upload) + else: + log.info('Not uploading archives.') diff --git a/teuthology/task/internal/check_lock.py b/teuthology/task/internal/check_lock.py new file mode 100644 index 0000000000..152e41c2d9 --- /dev/null +++ b/teuthology/task/internal/check_lock.py @@ -0,0 +1,35 @@ +import logging + +import teuthology.lock.query +import teuthology.lock.util + +from teuthology.config import config as teuth_config + +log = logging.getLogger(__name__) + + +def check_lock(ctx, config, check_up=True): + """ + Check lock status of remote machines. + """ + if not teuth_config.lock_server or ctx.config.get('check-locks') is False: + log.info('Lock checking disabled.') + return + log.info('Checking locks...') + for machine in ctx.config['targets'].keys(): + status = teuthology.lock.query.get_status(machine) + log.debug('machine status is %s', repr(status)) + assert status is not None, \ + 'could not read lock status for {name}'.format(name=machine) + if check_up: + assert status['up'], 'machine {name} is marked down'.format( + name=machine + ) + assert status['locked'], \ + 'machine {name} is not locked'.format(name=machine) + assert status['locked_by'] == ctx.owner, \ + 'machine {name} is locked by {user}, not {owner}'.format( + name=machine, + user=status['locked_by'], + owner=ctx.owner, + ) diff --git a/teuthology/task/internal/edit_sudoers.sh b/teuthology/task/internal/edit_sudoers.sh new file mode 100755 index 0000000000..6ab40a5d80 --- /dev/null +++ b/teuthology/task/internal/edit_sudoers.sh @@ -0,0 +1,10 @@ +#! /bin/sh + +sudo vi -e /etc/sudoers < # registry-name + """ + if ctx.config.get('redhat').get('setup_container_registry', None): + registry = ctx.config['redhat']['setup_container_registry'] + + # fetch credentials from teuth_config + creds = teuthconfig.get('registries', dict()).get(registry) + if not creds: + raise ConfigError("Registry not found....") + + # container-tool login + for remote in ctx.cluster.remotes.keys(): + container_tool = "podman" + if remote.os.version.startswith('7'): + container_tool = "docker" + + remote.run(args=[ + 'sudo', container_tool, + 'login', registry, + '--username', creds['username'], + '--password', creds['password'], + ] + ) + yield + +@contextlib.contextmanager +def setup_additional_repo(ctx, config): + """ + set additional repo's for testing + redhat: + set-add-repo: 'http://example.com/internal.repo' + """ + if ctx.config.get('redhat').get('set-add-repo', None): + add_repo = ctx.config.get('redhat').get('set-add-repo') + for remote in ctx.cluster.remotes.keys(): + if remote.os.package_type == 'rpm': + remote.run(args=['sudo', 'wget', '-O', '/etc/yum.repos.d/rh_add.repo', + add_repo]) + if not remote.os.version.startswith('8'): + remote.run(args=['sudo', 'yum', 'update', 'metadata']) + + yield + + +def _enable_rhel_repos(remote): + + # Look for rh specific repos + ds_yaml = os.path.join( + teuthconfig.get('ds_yaml_dir'), + teuthconfig.rhbuild + ".yaml" + ) + + rhel_repos = yaml.safe_load(open(ds_yaml)) + repos_to_subscribe = rhel_repos.get('rhel_repos').get(remote.os.version[0]) + + for repo in repos_to_subscribe: + remote.run(args=['sudo', 'subscription-manager', + 'repos', '--enable={r}'.format(r=repo)]) + + +@contextlib.contextmanager +def setup_base_repo(ctx, config): + """ + Setup repo based on redhat nodes + redhat: + base-repo-url: base url that provides Mon, OSD, Tools etc + installer-repo-url: Installer url that provides Agent, Installer + deb-repo-url: debian repo url + deb-gpg-key: gpg key used for signing the build + """ + rh_config = ctx.config.get('redhat') + if not rh_config.get('base-repo-url'): + # no repo defined + yield + if rh_config.get('set-cdn-repo'): + log.info("CDN repo already set, skipping rh repo") + yield + else: + _setup_latest_repo(ctx, rh_config) + try: + yield + finally: + log.info("Cleaning up repo's") + for remote in ctx.cluster.remotes.keys(): + if remote.os.package_type == 'rpm': + remote.run(args=['sudo', 'rm', + run.Raw('/etc/yum.repos.d/rh*.repo'), + ], check_status=False) + + +def _setup_latest_repo(ctx, config): + """ + Setup repo based on redhat nodes + """ + with parallel(): + for remote in ctx.cluster.remotes.keys(): + if remote.os.package_type == 'rpm': + # pre-cleanup + remote.run(args=['sudo', 'rm', run.Raw('/etc/yum.repos.d/rh*')], + check_status=False) + remote.run(args=['sudo', 'yum', 'clean', 'metadata']) + if not remote.os.version.startswith('8'): + remote.run(args=['sudo', 'yum', 'update', 'metadata']) + # skip is required for beta iso testing + if config.get('skip-subscription-manager', False) is True: + log.info("Skipping subscription-manager command") + else: + remote.run(args=['sudo', 'subscription-manager', 'repos', + run.Raw('--disable=*ceph*')], + check_status=False + ) + base_url = config.get('base-repo-url', '') + installer_url = config.get('installer-repo-url', '') + repos = ['MON', 'OSD', 'Tools', 'Calamari', 'Installer'] + installer_repos = ['Agent', 'Main', 'Installer'] + if config.get('base-rh-repos'): + repos = ctx.config.get('base-rh-repos') + if config.get('installer-repos'): + installer_repos = ctx.config.get('installer-repos') + # create base repo + if base_url.startswith('http'): + repo_to_use = _get_repos_to_use(base_url, repos) + base_repo_file = NamedTemporaryFile(mode='w', delete=False) + _create_temp_repo_file(repo_to_use, base_repo_file) + remote.put_file(base_repo_file.name, base_repo_file.name) + remote.run(args=['sudo', 'cp', base_repo_file.name, + '/etc/yum.repos.d/rh_ceph.repo']) + remote.run(args=['sudo', 'yum', 'clean', 'metadata']) + if installer_url.startswith('http'): + irepo_to_use = _get_repos_to_use( + installer_url, installer_repos) + installer_file = NamedTemporaryFile(delete=False) + _create_temp_repo_file(irepo_to_use, installer_file) + remote.put_file(installer_file.name, installer_file.name) + remote.run(args=['sudo', 'cp', installer_file.name, + '/etc/yum.repos.d/rh_inst.repo']) + remote.run(args=['sudo', 'yum', 'clean', 'metadata']) + if not remote.os.version.startswith('8'): + remote.run(args=['sudo', 'yum', 'update', 'metadata']) + else: + if config.get('deb-repo-url'): + deb_repo = config.get('deb-repo-url') + deb_gpg_key = config.get('deb-gpg-key', None) + set_deb_repo(remote, deb_repo, deb_gpg_key) + + +def _get_repos_to_use(base_url, repos): + repod = dict() + for repo in repos: + repo_to_use = base_url + "compose/" + repo + "/x86_64/os/" + r = requests.get(repo_to_use) + log.info("Checking %s", repo_to_use) + if r.status_code == 200: + log.info("Using %s", repo_to_use) + repod[repo] = repo_to_use + return repod + + +def _create_temp_repo_file(repos, repo_file): + for repo in repos.keys(): + header = "[ceph-" + repo + "]" + "\n" + name = "name=ceph-" + repo + "\n" + baseurl = "baseurl=" + repos[repo] + "\n" + gpgcheck = "gpgcheck=0\n" + enabled = "enabled=1\n\n" + repo_file.write(header) + repo_file.write(name) + repo_file.write(baseurl) + repo_file.write(gpgcheck) + repo_file.write(enabled) + repo_file.close() diff --git a/teuthology/task/internal/syslog.py b/teuthology/task/internal/syslog.py new file mode 100644 index 0000000000..64032a8e7c --- /dev/null +++ b/teuthology/task/internal/syslog.py @@ -0,0 +1,196 @@ +import contextlib +import logging + +from io import BytesIO + +from teuthology import misc +from teuthology.job_status import set_status +from teuthology.orchestra import run + + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def syslog(ctx, config): + """ + start syslog / stop syslog on exit. + """ + if ctx.archive is None: + # disable this whole feature if we're not going to archive the data + # anyway + yield + return + + cluster = ctx.cluster.filter(lambda r: not r.is_container) + if not len(cluster.remotes.keys()): + yield + return + + log.info('Starting syslog monitoring...') + + archive_dir = misc.get_archive_dir(ctx) + log_dir = '{adir}/syslog'.format(adir=archive_dir) + run.wait( + cluster.run( + args=['mkdir', '-p', '-m0755', '--', log_dir], + wait=False, + ) + ) + + CONF = '/etc/rsyslog.d/80-cephtest.conf' + kern_log = '{log_dir}/kern.log'.format(log_dir=log_dir) + misc_log = '{log_dir}/misc.log'.format(log_dir=log_dir) + conf_lines = [ + 'kern.* -{kern_log};RSYSLOG_FileFormat'.format(kern_log=kern_log), + '*.*;kern.none -{misc_log};RSYSLOG_FileFormat'.format( + misc_log=misc_log), + ] + conf_fp = BytesIO('\n'.join(conf_lines).encode()) + try: + for rem in cluster.remotes.keys(): + log_context = 'system_u:object_r:var_log_t:s0' + for log_path in (kern_log, misc_log): + rem.run(args=['install', '-m', '666', '/dev/null', log_path]) + rem.chcon(log_path, log_context) + misc.sudo_write_file( + remote=rem, + path=CONF, + data=conf_fp, + ) + conf_fp.seek(0) + run.wait( + cluster.run( + args=[ + 'sudo', + 'service', + # a mere reload (SIGHUP) doesn't seem to make + # rsyslog open the files + 'rsyslog', + 'restart', + ], + wait=False, + ), + ) + + yield + finally: + cluster = ctx.cluster.filter(lambda r: not r.is_container) + if not len(cluster.remotes.keys()): + return + + log.info('Shutting down syslog monitoring...') + + run.wait( + cluster.run( + args=[ + 'sudo', + 'rm', + '-f', + '--', + CONF, + run.Raw('&&'), + 'sudo', + 'service', + 'rsyslog', + 'restart', + ], + wait=False, + ), + ) + # race condition: nothing actually says rsyslog had time to + # flush the file fully. oh well. + + log.info('Checking logs for errors...') + for rem in cluster.remotes.keys(): + log.debug('Checking %s', rem.name) + stdout = rem.sh( + [ + 'egrep', '--binary-files=text', + '\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b', + run.Raw(f'{archive_dir}/syslog/kern.log'), + run.Raw('|'), + 'grep', '-v', 'task .* blocked for more than .* seconds', + run.Raw('|'), + 'grep', '-v', 'lockdep is turned off', + run.Raw('|'), + 'grep', '-v', 'trying to register non-static key', + run.Raw('|'), + 'grep', '-v', 'DEBUG: fsize', # xfs_fsr + run.Raw('|'), + 'grep', '-v', 'CRON', # ignore cron noise + run.Raw('|'), + 'grep', '-v', 'BUG: bad unlock balance detected', # #6097 + run.Raw('|'), + 'grep', '-v', 'inconsistent lock state', # FIXME see #2523 + run.Raw('|'), + 'grep', '-v', '*** DEADLOCK ***', # part of lockdep output + run.Raw('|'), + 'grep', '-v', + # FIXME see #2590 and #147 + 'INFO: possible irq lock inversion dependency detected', + run.Raw('|'), + 'grep', '-v', + 'INFO: NMI handler (perf_event_nmi_handler) took too long to run', # noqa + run.Raw('|'), + 'grep', '-v', 'INFO: recovery required on readonly', + run.Raw('|'), + 'grep', '-v', 'ceph-create-keys: INFO', + run.Raw('|'), + 'grep', '-v', 'INFO:ceph-create-keys', + run.Raw('|'), + 'grep', '-v', 'Loaded datasource DataSourceOpenStack', + run.Raw('|'), + 'grep', '-v', 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined', # noqa + run.Raw('|'), + 'egrep', '-v', '\\bsalt-master\\b|\\bsalt-minion\\b|\\bsalt-api\\b', + run.Raw('|'), + 'grep', '-v', 'ceph-crash', + run.Raw('|'), + 'egrep', '-v', '\\btcmu-runner\\b.*\\bINFO\\b', + run.Raw('|'), + 'head', '-n', '1', + ], + ) + if stdout != '': + log.error('Error in syslog on %s: %s', rem.name, stdout) + set_status(ctx.summary, 'fail') + if 'failure_reason' not in ctx.summary: + ctx.summary['failure_reason'] = \ + "'{error}' in syslog".format(error=stdout) + + log.info('Compressing syslogs...') + run.wait( + cluster.run( + args=[ + 'find', + '{adir}/syslog'.format(adir=archive_dir), + '-name', + '*.log', + '-print0', + run.Raw('|'), + 'sudo', + 'xargs', + '-0', + '--no-run-if-empty', + '--', + 'gzip', + '--', + ], + wait=False, + ) + ) + + log.info('Gathering journactl -b0...') + run.wait( + cluster.run( + args=[ + 'sudo', 'journalctl', '-b0', + run.Raw('|'), + 'gzip', '-9', + run.Raw('>'), + f'{archive_dir}/syslog/journalctl-b0.gz', + ], + wait=False, + ) + ) diff --git a/teuthology/task/internal/vm_setup.py b/teuthology/task/internal/vm_setup.py new file mode 100644 index 0000000000..f210bc7f41 --- /dev/null +++ b/teuthology/task/internal/vm_setup.py @@ -0,0 +1,51 @@ +import logging +import os +import subprocess + +from teuthology.parallel import parallel +from teuthology.task import ansible +from teuthology.exceptions import CommandFailedError + +log = logging.getLogger(__name__) + + +def vm_setup(ctx, config): + """ + Look for virtual machines and handle their initialization + """ + all_tasks = [list(x.keys())[0] for x in ctx.config['tasks']] + need_ansible = False + if 'kernel' in all_tasks and 'ansible.cephlab' not in all_tasks: + need_ansible = True + ansible_hosts = set() + with parallel(): + editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh') + for rem in ctx.cluster.remotes.keys(): + if rem.is_vm: + ansible_hosts.add(rem.shortname) + try: + rem.sh('test -e /ceph-qa-ready') + except CommandFailedError: + p1 = subprocess.Popen(['cat', editinfo], + stdout=subprocess.PIPE) + p2 = subprocess.Popen( + [ + 'ssh', + '-o', 'StrictHostKeyChecking=no', + '-t', '-t', + str(rem), + 'sudo', + 'sh' + ], + stdin=p1.stdout, stdout=subprocess.PIPE + ) + _, err = p2.communicate() + if err: + log.error("Edit of /etc/sudoers failed: %s", err) + if need_ansible and ansible_hosts: + log.info("Running ansible on %s", list(ansible_hosts)) + ansible_config = dict( + hosts=list(ansible_hosts), + ) + with ansible.CephLab(ctx, config=ansible_config): + pass diff --git a/teuthology/task/iscsi.py b/teuthology/task/iscsi.py new file mode 100644 index 0000000000..80d01cb8f1 --- /dev/null +++ b/teuthology/task/iscsi.py @@ -0,0 +1,214 @@ +""" +Handle iscsi adm commands for tgt connections. +""" +import logging +import contextlib +import socket + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.task.common_fs_utils import generic_mkfs +from teuthology.task.common_fs_utils import generic_mount +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +def _get_remote(remotes, client): + """ + Get remote object that is associated with the client specified. + """ + for rem in remotes: + if client in remotes[rem]: + return rem + + +def _get_remote_name(remotes, client): + """ + Get remote name that is associated with the client specified. + """ + rem_name = _get_remote(remotes, client).name + rem_name = rem_name[rem_name.find('@') + 1:] + return rem_name + + +def tgt_devname_get(ctx, test_image): + """ + Get the name of the newly created device by following the by-path + link (which is symbolically linked to the appropriate /dev/sd* file). + """ + remotes = ctx.cluster.only(teuthology.is_type('client')).remotes + rem_name = _get_remote_name(remotes, test_image) + lnkpath = '/dev/disk/by-path/ip-%s:3260-iscsi-rbd-lun-1' % \ + socket.gethostbyname(rem_name) + return lnkpath + + +def tgt_devname_rtn(ctx, test_image): + """ + Wrapper passed to common_fs_util functions. + """ + image = test_image[test_image.find('.') + 1:] + return tgt_devname_get(ctx, image) + + +def file_io_test(rem, file_from, lnkpath): + """ + dd to the iscsi inteface, read it, and compare with original + """ + rem.run( + args=[ + 'sudo', + 'dd', + 'if=%s' % file_from, + 'of=%s' % lnkpath, + 'bs=1024', + 'conv=fsync', + ]) + tfile2 = rem.sh('mktemp').strip() + rem.run( + args=[ + 'sudo', + 'rbd', + 'export', + 'iscsi-image', + run.Raw('-'), + run.Raw('>'), + tfile2, + ]) + size = rem.sh( + [ + 'ls', + '-l', + file_from, + run.Raw('|'), + 'awk', + '{print $5}', ], + ).strip() + rem.run( + args=[ + 'cmp', + '-n', + size, + file_from, + tfile2, + ]) + rem.run(args=['rm', tfile2]) + + +def general_io_test(ctx, rem, image_name): + """ + Do simple I/O tests to the iscsi interface before putting a + filesystem on it. + """ + rem.run( + args=[ + 'udevadm', + 'settle', + ]) + test_phrase = 'The time has come the walrus said to speak of many things.' + lnkpath = tgt_devname_get(ctx, image_name) + tfile1 = rem.sh('mktemp').strip() + rem.run( + args=[ + 'echo', + test_phrase, + run.Raw('>'), + tfile1, + ]) + file_io_test(rem, tfile1, lnkpath) + rem.run(args=['rm', tfile1]) + file_io_test(rem, '/bin/ls', lnkpath) + + +@contextlib.contextmanager +def start_iscsi_initiators(ctx, tgt_link): + """ + This is the sub-task that assigns an rbd to an iscsiadm control and + performs a login (thereby creating a /dev/sd device). It performs + a logout when finished. + """ + remotes = ctx.cluster.only(teuthology.is_type('client')).remotes + tgtd_list = [] + for role, host in tgt_link: + rem = _get_remote(remotes, role) + rem_name = _get_remote_name(remotes, host) + rem.run( + args=[ + 'sudo', + 'iscsiadm', + '-m', + 'discovery', + '-t', + 'st', + '-p', + rem_name, + ]) + proc = rem.run( + args=[ + 'sudo', + 'iscsiadm', + '-m', + 'node', + '--login', + ]) + if proc.exitstatus == 0: + tgtd_list.append((rem, rem_name)) + general_io_test(ctx, rem, host) + try: + with contextutil.nested( + lambda: generic_mkfs(ctx=ctx, config={host: {'fs_type': 'xfs'}}, + devname_rtn=tgt_devname_rtn), + lambda: generic_mount(ctx=ctx, config={host: None}, + devname_rtn=tgt_devname_rtn), + ): + yield + finally: + for rem_info in tgtd_list: + rem = rem_info[0] + rem_name = rem_info[1] + rem.run( + args=[ + 'sudo', + 'iscsiadm', + '-m', + 'node', + '--logout', + ]) + +@contextlib.contextmanager +def task(ctx, config): + """ + handle iscsi admin login after a tgt connection has been established. + + Assume a default host client of client.0 and a sending client of + client.0 if not specified otherwise. + + Sample tests could be: + + iscsi: + + This sets up a tgt link from client.0 to client.0 + + iscsi: [client.1, client.2] + + This sets up a tgt link from client.1 to client.0 and a tgt link + from client.2 to client.0 + + iscsi: + client.0: client.1 + client.1: client.0 + + This sets up a tgt link from client.0 to client.1 and a tgt link + from client.1 to client.0 + + Note that the iscsi image name is iscsi-image, so this only works + for one image being tested at any one time. + """ + try: + pairs = config.items() + except AttributeError: + pairs = [('client.0', 'client.0')] + with contextutil.nested( + lambda: start_iscsi_initiators(ctx=ctx, tgt_link=pairs),): + yield diff --git a/teuthology/task/kernel.py b/teuthology/task/kernel.py new file mode 100644 index 0000000000..23c164cd53 --- /dev/null +++ b/teuthology/task/kernel.py @@ -0,0 +1,1341 @@ +""" +Kernel installation task +""" + +import logging +import os +import re +import shlex +from io import StringIO + +from teuthology.util.compat import urljoin + +from teuthology import misc as teuthology +from teuthology.parallel import parallel +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.exceptions import ( + UnsupportedPackageTypeError, + ConfigError, + VersionNotFoundError, +) +from teuthology.packaging import ( + install_package, + get_koji_build_info, + get_kojiroot_base_url, + get_koji_package_name, + get_koji_task_rpm_info, + get_koji_task_result, + get_builder_project, +) +from teuthology.task.install.deb import install_dep_packages + +log = logging.getLogger(__name__) + +CONFIG_DEFAULT = {'branch': 'main'} +TIMEOUT_DEFAULT = 300 + +VERSION_KEYS = ['branch', 'tag', 'sha1', 'deb', 'rpm', 'koji', 'koji_task'] + + +def normalize_config(ctx, config): + """ + Returns a config whose keys are all real roles. + Generic roles (client, mon, osd, etc.) are replaced with + the actual roles (client.0, client.1, etc.). If the config + specifies a different version for a specific role, this is + unchanged. + + For example, with 4 OSDs this:: + + osd: + tag: v3.0 + kdb: true + osd.1: + branch: new_btrfs + kdb: false + osd.3: + deb: /path/to/linux-whatever.deb + + is transformed into:: + + osd.0: + tag: v3.0 + kdb: true + osd.1: + branch: new_btrfs + kdb: false + osd.2: + tag: v3.0 + kdb: true + osd.3: + deb: /path/to/linux-whatever.deb + + If config is None or just specifies a version to use, + it is applied to all nodes. + + :param ctx: Context + :param config: Configuration + """ + log.info(f'normalize config orig: {config}') + if not config or \ + len([x for x in config.keys() if x in + VERSION_KEYS + ['kdb', 'flavor', 'hwe']]) == len(config.keys()): + new_config = {} + if not config: + config = CONFIG_DEFAULT + for role in teuthology.all_roles(ctx.cluster): + new_config[role] = config.copy() + return new_config + + new_config = {} + for role, role_config in config.items(): + if role_config is None: + role_config = CONFIG_DEFAULT + if '.' in role: + new_config[role] = role_config.copy() + else: + for id_ in teuthology.all_roles_of_type(ctx.cluster, role): + name = '{type}.{id}'.format(type=role, id=id_) + # specific overrides generic + if name not in config: + new_config[name] = role_config.copy() + log.info(f'normalize config final: {new_config}') + return new_config + + +def normalize_and_apply_overrides(ctx, config, overrides): + """ + kernel task config is hierarchical and needs to be transformed into + a normal form, see normalize_config() for details. Applying overrides is + also more involved compared to other tasks because of the number of ways + a version of the kernel to install can be specified. + + Returns a (normalized config, timeout) tuple. + + :param ctx: Context + :param config: Configuration + """ + timeout = TIMEOUT_DEFAULT + if 'timeout' in config: + timeout = config.pop('timeout') + config = normalize_config(ctx, config) + + if 'timeout' in overrides: + timeout = overrides.pop('timeout') + if overrides: + overrides = normalize_config(ctx, overrides) + log.debug('normalized overrides %s' % overrides) + + # Handle a case when a version specified with one type of version key + # is overridden by a version specified with another type of version key + # (e.g. 'branch: foo' is overridden with 'tag: bar'). To be able to + # use deep_merge(), drop all version keys from the original config if + # the corresponding override has a version key. + for role, role_config in config.items(): + if (role in overrides and + any(k in overrides[role] for k in VERSION_KEYS)): + for k in VERSION_KEYS: + role_config.pop(k, None) + teuthology.deep_merge(config, overrides) + + return (config, timeout) + + +def validate_config(ctx, config): + """ + Make sure that all kernels in the list of remove kernels + refer to the same kernel. + + :param ctx: Context + :param config: Configuration + """ + for _, roles_for_host in ctx.cluster.remotes.items(): + kernel = None + for role in roles_for_host: + role_kernel = config.get(role, kernel) + if kernel is None: + kernel = role_kernel + elif role_kernel is not None: + assert kernel == role_kernel, \ + "everything on the same host must use the same kernel" + if role in config: + del config[role] + + +def need_to_install(ctx, role, version): + """ + Check to see if we need to install a kernel. Get the version of the + currently running kernel, and compare it against the value passed in. + + :param ctx: Context + :param role: Role + :param version: value to compare against (used in checking), can be either + a utsrelease string (e.g. '3.13.0-rc3-ceph-00049-ge2817b3') + or a sha1. + """ + ret = True + log.info('Checking kernel version of {role}, want "{ver}"...'.format( + role=role, ver=version)) + uname_fp = StringIO() + ctx.cluster.only(role).run( + args=[ + 'uname', + '-r', + ], + stdout=uname_fp, + ) + cur_version = uname_fp.getvalue().rstrip('\n') + log.debug('current kernel version is {ver} vs {want}'.format(ver=cur_version, + want=version)) + + if '.' in str(version): + if cur_version == version: + log.debug('utsrelease strings match, do not need to install') + ret = False + os_type = teuthology.get_distro(ctx) + log.debug("Distro of this test job: {}".format(os_type)) + if os_type in ['sle', 'opensuse']: + cur_version_match = re.search('(.*)-default$', cur_version) + if cur_version_match: + cur_version_rp = cur_version_match.group(1) + if cur_version_rp in version: + log.debug('"{}" is a substring of "{}" - the latest {} kernel is running' + .format(cur_version_rp, version, os_type)) + ret = False + else: + log.debug('failed to parse current kernel version {} (os_type is "{}")' + .format(cur_version, os_type)) + else: + # version is sha1, need to try to extract sha1 from cur_version + match = re.search('[-_]g([0-9a-f]{6,40})', cur_version) + if match: + cur_sha1 = match.group(1) + log.debug('extracting sha1, {ver} -> {sha1}'.format( + ver=cur_version, sha1=cur_sha1)) + m = min(len(cur_sha1), len(version)) + assert m >= 6, "cur_sha1 and/or version is too short, m = %d" % m + if cur_sha1[0:m] == version[0:m]: + log.debug('extracted sha1 matches, do not need to install') + ret = False + else: + log.debug('failed to parse current kernel version') + uname_fp.close() + return ret + + +def install_firmware(ctx, config): + """ + Go to the github to get the latest firmware. + + :param ctx: Context + :param config: Configuration + """ + linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git' + uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream + fw_dir = '/lib/firmware/updates' + + for role in config.keys(): + if isinstance(config[role], str) and config[role].find('distro') >= 0: + log.info('Skipping firmware on distro kernel'); + return + (role_remote,) = ctx.cluster.only(role).remotes.keys() + package_type = role_remote.os.package_type + if package_type == 'rpm': + role_remote.run(args=[ + 'sudo', 'yum', 'upgrade', '-y', 'linux-firmware', + ]) + continue + log.info('Installing linux-firmware on {role}...'.format(role=role)) + role_remote.run( + args=[ + # kludge around mysterious 0-byte .git/HEAD files + 'cd', fw_dir, + run.Raw('&&'), + 'test', '-d', '.git', + run.Raw('&&'), + 'test', '!', '-s', '.git/HEAD', + run.Raw('&&'), + 'sudo', 'rm', '-rf', '.git', + run.Raw(';'), + # init + 'sudo', 'install', '-d', '-m0755', fw_dir, + run.Raw('&&'), + 'cd', fw_dir, + run.Raw('&&'), + 'sudo', 'git', 'init', + ], + ) + role_remote.run( + args=[ + 'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'config', + '--get', 'remote.origin.url', run.Raw('>/dev/null'), + run.Raw('||'), + 'sudo', 'git', '--git-dir=%s/.git' % fw_dir, + 'remote', 'add', 'origin', uri, + ], + ) + # In case the remote already existed, set its url + role_remote.run( + args=[ + 'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'remote', + 'set-url', 'origin', uri, run.Raw('>/dev/null') + ] + ) + role_remote.run( + args=[ + 'cd', fw_dir, + run.Raw('&&'), + 'sudo', 'git', 'fetch', 'origin', + run.Raw('&&'), + 'sudo', 'git', 'reset', '--hard', 'origin/main' + ], + ) + + +def gitbuilder_pkg_name(remote): + if remote.os.package_type == 'rpm': + pkg_name = 'kernel.x86_64.rpm' + elif remote.os.package_type == 'deb': + pkg_name = 'linux-image.deb' + else: + raise UnsupportedPackageTypeError(remote) + return pkg_name + + +def remote_pkg_path(remote): + """ + This is where kernel packages are copied over (in case of local + packages) or downloaded to (in case of gitbuilder packages) and + then installed from. + """ + return os.path.join('/tmp', gitbuilder_pkg_name(remote)) + + +def download_kernel(ctx, config): + """ + Supply each remote with a kernel package: + - local kernels are copied over + - gitbuilder kernels are downloaded + - nothing is done for distro kernels + + :param ctx: Context + :param config: Configuration + """ + procs = {} + for role, src in config.items(): + needs_download = False + + if src == 'distro': + # don't need to download distro kernels + log.debug("src is distro, skipping download"); + continue + + (role_remote,) = ctx.cluster.only(role).remotes.keys() + if isinstance(src, dict): + # we're downloading a kernel from koji, the src dict here + # is the build_info retrieved from koji using get_koji_build_info + if src.get("id"): + build_id = src["id"] + log.info("Downloading kernel with build_id {build_id} on {role}...".format( + build_id=build_id, + role=role + )) + needs_download = True + baseurl = get_kojiroot_base_url(src) + pkg_name = get_koji_package_name("kernel", src) + elif src.get("task_id"): + needs_download = True + log.info("Downloading kernel with task_id {task_id} on {role}...".format( + task_id=src["task_id"], + role=role + )) + baseurl = src["base_url"] + # this var is also poorly named as it's not the package name, + # but the full name of the rpm file to download. + pkg_name = src["rpm_name"] + elif src.find('/') >= 0: + # local package - src is path + log.info('Copying kernel package {path} to {role}...'.format( + path=src, role=role)) + role_remote.put_file(src,remote_pkg_path(role_remote)) + else: + # gitbuilder package - src is sha1 + log.info('Downloading kernel {sha1} on {role}...'.format( + sha1=src, + role=role, + )) + needs_download = True + + builder = get_builder_project()( + 'kernel', + {'sha1': src}, + ctx=ctx, + remote=role_remote, + ) + if teuth_config.use_shaman: + if role_remote.os.package_type == 'rpm': + arch = builder.arch + baseurl = urljoin( + builder.base_url, + '/'.join([arch, '']) + ) + pkg_name = "kernel-%s.%s.rpm" % ( + builder.version, + arch, + ) + elif role_remote.os.package_type == 'deb': + arch = 'amd64' # FIXME + baseurl = urljoin( + builder.base_url, + '/'.join([ + 'pool', 'main', 'l', + 'linux-%s' % builder.scm_version, '' + ]) + ) + pkg_name = 'linux-image-%s_%s_%s.deb' % ( + builder.scm_version, + builder.version, + arch, + ) + else: + baseurl = builder.base_url + "/" + pkg_name = gitbuilder_pkg_name(role_remote) + + log.info("fetching, builder baseurl is %s", baseurl) + + if needs_download: + proc = role_remote.run( + args=[ + 'rm', '-f', remote_pkg_path(role_remote), + run.Raw('&&'), + 'echo', + pkg_name, + run.Raw('|'), + 'wget', + '-nv', + '-O', + remote_pkg_path(role_remote), + '--base={url}'.format(url=baseurl), + '--input-file=-', + ], + wait=False) + procs[role_remote.name] = proc + + for name, proc in procs.items(): + log.debug('Waiting for download/copy to %s to complete...', name) + proc.wait() + + +def _no_grub_link(in_file, remote, kernel_ver): + """ + Copy and link kernel related files if grub cannot be used + (as is the case in Arm kernels) + + :param infile: kernel file or image file to be copied. + :param remote: remote machine + :param kernel_ver: kernel version + """ + boot1 = '/boot/%s' % in_file + boot2 = '%s.old' % boot1 + remote.run( + args=[ + 'if', 'test', '-e', boot1, run.Raw(';'), 'then', + 'sudo', 'mv', boot1, boot2, run.Raw(';'), 'fi',], + ) + remote.run( + args=['sudo', 'ln', '-s', '%s-%s' % (in_file, kernel_ver) , boot1, ], + ) + + +def install_latest_rh_kernel(ctx, config): + """ + Installs the lastest z stream kernel + Reboot for the new kernel to take effect + """ + if config is None: + config = {} + if config.get('skip'): + return + with parallel() as p: + for remote in ctx.cluster.remotes.keys(): + p.spawn(update_rh_kernel, remote) + + +def update_rh_kernel(remote): + package_type = remote.os.package_type + remote.run(args=['uname', '-a']) + import time + if package_type == 'rpm': + update_log = remote.sh('sudo yum update -y kernel') + log.info(update_log) + if not update_log.find("Installed") == -1: + log.info("Kernel updated to latest z stream on %s", remote.shortname) + log.info("Rebooting %s", remote.shortname) + remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False) + time.sleep(40) + log.info("Reconnecting after reboot") + remote.reconnect(timeout=300) + remote.run(args=['uname', '-a']) + elif not update_log.find('No packages marked for update') == -1: + log.info("Latest version already installed on %s", remote.shortname) + + +def install_and_reboot(ctx, need_install, config): + """ + Install and reboot the kernel. This mostly performs remote + installation operations. The code does check for Arm images + and skips grub operations if the kernel is Arm. Otherwise, it + extracts kernel titles from submenu entries and makes the appropriate + grub calls. The assumptions here are somewhat simplified in that + it expects kernel entries to be present under submenu entries. + + :param ctx: Context + :param need_install: map from caller + :param config: Configuration + """ + procs = {} + kernel_title = '' + for role, src in need_install.items(): + (role_remote,) = ctx.cluster.only(role).remotes.keys() + if isinstance(src, str) and src.find('distro') >= 0: + log.info('Installing distro kernel on {role}...'.format(role=role)) + install_kernel(role_remote, config[role], version=src) + continue + + log.info('Installing kernel {src} on {role}...'.format(src=src, + role=role)) + package_type = role_remote.os.package_type + if package_type == 'rpm': + proc = role_remote.run( + args=[ + 'sudo', + 'rpm', + '-ivh', + '--oldpackage', + '--replacefiles', + '--replacepkgs', + remote_pkg_path(role_remote), + ]) + install_kernel(role_remote, config[role], path=remote_pkg_path(role_remote)) + continue + + # TODO: Refactor this into install_kernel() so that it handles all + # cases for both rpm and deb packages. + proc = role_remote.run( + args=[ + # install the kernel deb + 'sudo', + 'dpkg', + '-i', + remote_pkg_path(role_remote), + ], + ) + + # collect kernel image name from the .deb + kernel_title = get_image_version(role_remote, + remote_pkg_path(role_remote)) + log.info('searching for kernel {}'.format(kernel_title)) + + if kernel_title.endswith("-highbank"): + _no_grub_link('vmlinuz', role_remote, kernel_title) + _no_grub_link('initrd.img', role_remote, kernel_title) + proc = role_remote.run( + args=[ + 'sudo', + 'shutdown', + '-r', + 'now', + ], + wait=False, + ) + procs[role_remote.name] = proc + continue + + # look for menuentry for our kernel, and collect any + # submenu entries for their titles. Assume that if our + # kernel entry appears later in the file than a submenu entry, + # it's actually nested under that submenu. If it gets more + # complex this will totally break. + + kernel_entries = role_remote.sh([ + 'egrep', + '(submenu|menuentry.*' + kernel_title + ').*{', + '/boot/grub/grub.cfg' + ]).split('\n') + submenu_title = '' + default_title = '' + for l in kernel_entries: + fields = shlex.split(l) + if len(fields) >= 2: + command, title = fields[:2] + if command == 'submenu': + submenu_title = title + '>' + if command == 'menuentry': + if title.endswith(kernel_title): + default_title = title + break + log.info('submenu_title:{}'.format(submenu_title)) + log.info('default_title:{}'.format(default_title)) + + proc = role_remote.run( + args=[ + # use the title(s) to construct the content of + # the grub menu entry, so we can default to it. + '/bin/echo', + '-e', + r'cat </dev/null'), + run.Raw('&&'), + 'sudo', + 'chmod', + 'a+x', + '--', + '/etc/grub.d/01_ceph_kernel.tmp~', + run.Raw('&&'), + 'sudo', + 'mv', + '--', + '/etc/grub.d/01_ceph_kernel.tmp~', + '/etc/grub.d/01_ceph_kernel', + # update grub again so it accepts our default + run.Raw('&&'), + 'sudo', + 'update-grub', + run.Raw('&&'), + 'rm', + remote_pkg_path(role_remote), + run.Raw('&&'), + # work around a systemd issue, where network gets shut down + # before ssh can close its session + run.Raw('('), + 'sleep', + '1', + run.Raw('&&'), + 'sudo', + 'shutdown', + '-r', + 'now', + run.Raw('&'), + run.Raw(')'), + ], + wait=False, + ) + procs[role_remote.name] = proc + + for name, proc in procs.items(): + log.debug('Waiting for install on %s to complete...', name) + proc.wait() + + +def enable_disable_kdb(ctx, config): + """ + Enable kdb on remote machines in use. Disable on those that are + not in use. + + :param ctx: Context + :param config: Configuration + """ + for role, enable in config.items(): + (role_remote,) = ctx.cluster.only(role).remotes.keys() + if "mira" in role_remote.name: + serialdev = "ttyS2" + else: + serialdev = "ttyS1" + if enable: + log.info('Enabling kdb on {role}...'.format(role=role)) + try: + role_remote.run( + args=[ + 'echo', serialdev, + run.Raw('|'), + 'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc' + ]) + except run.CommandFailedError: + log.warning('Kernel does not support kdb') + else: + log.info('Disabling kdb on {role}...'.format(role=role)) + # Add true pipe so command doesn't fail on kernel without kdb support. + try: + role_remote.run( + args=[ + 'echo', '', + run.Raw('|'), + 'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc', + run.Raw('|'), + 'true', + ]) + except run.CommandFailedError: + log.warning('Kernel does not support kdb') + + +def wait_for_reboot(ctx, need_install, timeout, config, distro=False): + """ + Loop reconnecting and checking kernel versions until + they're all correct or the timeout is exceeded. + + :param ctx: Context + :param need_install: list of packages that we need to reinstall. + :param timeout: number of second before we timeout. + """ + import time + # do not try to reconnect immediately after triggering the reboot, + # because the reboot sequence might not have started yet (!) -- + # see https://tracker.ceph.com/issues/44187 + time.sleep(30) + starttime = time.time() + while need_install: + for client in list(need_install.keys()): + if 'distro' in str(need_install[client]): + distro = True + log.info('Checking client {client} for new kernel version...'.format(client=client)) + try: + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.reconnect(timeout=timeout) + if distro: + assert not need_to_install_distro(remote, config[client]), \ + 'failed to install new distro kernel version within timeout' + + else: + assert not need_to_install(ctx, client, need_install[client]), \ + 'failed to install new kernel version within timeout' + del need_install[client] + except Exception: + log.exception("Saw exception") + # ignore connection resets and asserts while time is left + if time.time() - starttime > timeout: + raise + time.sleep(1) + + +def get_version_of_running_kernel(remote): + """ + Get the current running kernel version in a format that can be compared + with the output of "rpm -q kernel..." + """ + dist_release = remote.os.name + uname_r = remote.sh("uname -r").strip() + current = None + if dist_release in ['opensuse', 'sle']: + # "uname -r" returns 4.12.14-lp151.28.36-default + # "rpm -q kernel-default" returns 4.12.14-lp151.28.36.1.x86_64 + # In order to be able to meaningfully check whether the former + # is "in" the latter, we have to chop off the "-default". + current = re.sub(r"-default$", "", uname_r) + else: + current = uname_r + return current + + +def need_to_install_distro(remote, role_config): + """ + Installing kernels on rpm won't setup grub/boot into them. This installs + the newest kernel package and checks its version and compares against + the running kernel (uname -r). Similar check for deb. + + :returns: False if running the newest distro kernel. Returns the version of + the newest if it is not running. + """ + dist_release = remote.os.name + package_type = remote.os.package_type + current = get_version_of_running_kernel(remote) + log.info("Running kernel on {node}: {version}".format( + node=remote.shortname, version=current)) + installed_version = None + if package_type == 'rpm': + if dist_release in ['opensuse', 'sle']: + install_stdout = remote.sh( + 'sudo zypper --non-interactive install kernel-default' + ) + else: + install_stdout = remote.sh( + 'sudo yum install -y kernel' + ) + match = re.search( + "Package (.*) already installed", + install_stdout, flags=re.MULTILINE) + if 'Nothing to do' in install_stdout: + installed_version = match.groups()[0] if match else '' + err_mess = StringIO() + err_mess.truncate(0) + remote.run(args=['echo', 'no', run.Raw('|'), 'sudo', 'yum', + 'reinstall', 'kernel', run.Raw('||'), 'true'], + stderr=err_mess) + reinstall_stderr = err_mess.getvalue() + err_mess.close() + if 'Skipping the running kernel' in reinstall_stderr: + running_version = re.search( + "Skipping the running kernel: (.*)", + reinstall_stderr, flags=re.MULTILINE).groups()[0] + if installed_version == running_version: + log.info( + 'Newest distro kernel already installed and running') + return False + else: + remote.run(args=['sudo', 'yum', 'reinstall', '-y', 'kernel', + run.Raw('||'), 'true']) + newest = get_latest_image_version_rpm(remote) + + if package_type == 'deb': + newest = get_latest_image_version_deb(remote, dist_release, role_config) + + if current in newest or current.replace('-', '_') in newest: + log.info('Newest distro kernel installed and running') + return False + log.info( + 'Not newest distro kernel. Current: {cur} Expected: {new}'.format( + cur=current, new=newest)) + return newest + + +def maybe_generate_initrd_rpm(remote, path, version): + """ + Generate initrd with mkinitrd if the hooks that should make it + happen on its own aren't there. + + :param path: rpm package path + :param version: kernel version to generate initrd for + e.g. 3.18.0-rc6-ceph-00562-g79a9fa5 + """ + out = remote.sh(['rpm', '--scripts', '-qp', path]) + if 'bin/installkernel' in out or 'bin/kernel-install' in out: + return + + log.info("No installkernel or kernel-install hook in %s, " + "will generate initrd for %s", path, version) + remote.run( + args=[ + 'sudo', + 'mkinitrd', + '--allow-missing', + '-f', # overwrite existing initrd + '/boot/initramfs-' + version + '.img', + version, + ]) + + +def install_kernel(remote, role_config, path=None, version=None): + """ + A bit of misnomer perhaps - the actual kernel package is installed + elsewhere, this function deals with initrd and grub. Currently the + following cases are handled: + - local, gitbuilder, distro for rpm packages + - distro for deb packages - see TODO in install_and_reboot() + + TODO: reboots should be issued from install_and_reboot() + + :param path: package path (for local and gitbuilder cases) + :param version: for RPM distro kernels, pass this to update_grub_rpm + """ + dist_release = remote.os.name + templ = "install_kernel(remote={remote}, path={path}, version={version})" + log.debug(templ.format(remote=remote, path=path, version=version)) + package_type = remote.os.package_type + if package_type == 'rpm': + if dist_release in ['opensuse', 'sle']: + # FIXME + pass + else: + if path: + version = get_image_version(remote, path) + # This is either a gitbuilder or a local package and both of these + # could have been built with upstream rpm targets with specs that + # don't have a %post section at all, which means no initrd. + maybe_generate_initrd_rpm(remote, path, version) + elif not version or version == 'distro': + version = get_latest_image_version_rpm(remote) + update_grub_rpm(remote, version) + remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False ) + return + + if package_type == 'deb': + newversion = get_latest_image_version_deb(remote, dist_release, role_config) + if 'ubuntu' in dist_release: + grub2conf = teuthology.get_file(remote, + '/boot/grub/grub.cfg', sudo=True).decode() + submenu = '' + menuentry = '' + for line in grub2conf.split('\n'): + if 'submenu' in line: + submenu = line.split('submenu ')[1] + # Ubuntu likes to be sneaky and change formatting of + # grub.cfg between quotes/doublequotes between versions + if submenu.startswith("'"): + submenu = submenu.split("'")[1] + if submenu.startswith('"'): + submenu = submenu.split('"')[1] + if 'menuentry' in line: + if newversion in line and 'recovery' not in line: + menuentry = line.split('\'')[1] + break + if submenu: + grubvalue = submenu + '>' + menuentry + else: + grubvalue = menuentry + grubfile = 'cat < %s -> %s", path, basename, sha1) + return sha1 + + +def task(ctx, config): + """ + Make sure the specified kernel is installed. + This can be a branch, tag, or sha1 of ceph-client.git or a local + kernel package. + + To install ceph-client.git branch (default: main):: + + kernel: + branch: testing + + To install ceph-client.git tag:: + + kernel: + tag: v3.18 + + To install ceph-client.git sha1:: + + kernel: + sha1: 275dd19ea4e84c34f985ba097f9cddb539f54a50 + + To install from a koji build_id:: + + kernel: + koji: 416058 + + To install from a koji task_id:: + + kernel: + koji_task: 9678206 + + When installing from koji you also need to set the urls for koji hub + and the koji root in your teuthology.yaml config file. These are shown + below with their default values:: + + kojihub_url: http://koji.fedoraproject.org/kojihub + kojiroot_url: http://kojipkgs.fedoraproject.org/packages + + When installing from a koji task_id you also need to set koji_task_url, + which is the base url used to download rpms from koji task results:: + + koji_task_url: https://kojipkgs.fedoraproject.org/work/ + + To install local rpm (target should be an rpm system):: + + kernel: + rpm: /path/to/appropriately-named.rpm + + To install local deb (target should be a deb system):: + + kernel: + deb: /path/to/appropriately-named.deb + + For rpm: or deb: to work it should be able to figure out sha1 from + local kernel package basename, see get_sha1_from_pkg_name(). This + means that you can't for example install a local tag - package built + with upstream {rpm,deb}-pkg targets won't have a sha1 in its name. + + If you want to schedule a run and use a local kernel package, you + have to copy the package over to a box teuthology workers are + running on and specify a path to the package on that box. + + All of the above will install a specified kernel on all targets. + You can specify different kernels for each role or for all roles of + a certain type (more specific roles override less specific, see + normalize_config() for details):: + + kernel: + client: + tag: v3.0 + osd: + branch: btrfs_fixes + client.1: + branch: more_specific + osd.3: + branch: main + + To wait 3 minutes for hosts to reboot (default: 300):: + + kernel: + timeout: 180 + + To enable kdb:: + + kernel: + kdb: true + + :param ctx: Context + :param config: Configuration + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + "task kernel only supports a dictionary for configuration" + + overrides = ctx.config.get('overrides', {}).get('kernel', {}) + config, timeout = normalize_and_apply_overrides(ctx, config, overrides) + validate_config(ctx, config) + log.info('config %s, timeout %d' % (config, timeout)) + + with parallel() as p: + for role, role_config in config.items(): + p.spawn(process_role, ctx, config, timeout, role, role_config) + + +def process_role(ctx, config, timeout, role, role_config): + need_install = None # sha1 to dl, or path to rpm or deb + need_version = None # utsrelease or sha1 + + # gather information about this remote + (role_remote,) = ctx.cluster.only(role).remotes.keys() + system_type = role_remote.os.name + if role_remote.is_container: + log.info(f"Remote f{role_remote.shortname} is a container; skipping kernel installation") + return + if role_config.get('rpm') or role_config.get('deb'): + # We only care about path - deb: vs rpm: is meaningless, + # rpm: just happens to be parsed first. Nothing is stopping + # 'deb: /path/to/foo.rpm' and it will work provided remote's + # os.package_type is 'rpm' and vice versa. + path = role_config.get('rpm') + if not path: + path = role_config.get('deb') + sha1 = get_sha1_from_pkg_name(path) + assert sha1, "failed to extract commit hash from path %s" % path + if need_to_install(ctx, role, sha1): + need_install = path + need_version = sha1 + elif role_config.get('sha1') == 'distro': + version = need_to_install_distro(role_remote, role_config) + if version: + need_install = 'distro' + need_version = version + elif role_config.get("koji") or role_config.get('koji_task'): + # installing a kernel from koji + build_id = role_config.get("koji") + task_id = role_config.get("koji_task") + if role_remote.os.package_type != "rpm": + msg = ( + "Installing a kernel from koji is only supported " + "on rpm based systems. System type is {system_type}." + ) + msg = msg.format(system_type=system_type) + log.error(msg) + ctx.summary["failure_reason"] = msg + ctx.summary["status"] = "dead" + raise ConfigError(msg) + + # FIXME: this install should probably happen somewhere else + # but I'm not sure where, so we'll leave it here for now. + install_package('koji', role_remote) + + if build_id: + # get information about this build from koji + build_info = get_koji_build_info(build_id, role_remote, ctx) + version = "{ver}-{rel}.x86_64".format( + ver=build_info["version"], + rel=build_info["release"] + ) + elif task_id: + # get information about results of this task from koji + task_result = get_koji_task_result(task_id, role_remote, ctx) + # this is not really 'build_info', it's a dict of information + # about the kernel rpm from the task results, but for the sake + # of reusing the code below I'll still call it that. + build_info = get_koji_task_rpm_info( + 'kernel', + task_result['rpms'] + ) + # add task_id so we can know later that we're installing + # from a task and not a build. + build_info["task_id"] = task_id + version = build_info["version"] + + if need_to_install(ctx, role, version): + need_install = build_info + need_version = version + else: + builder = get_builder_project()( + "kernel", + role_config, + ctx=ctx, + remote=role_remote, + ) + sha1 = builder.sha1 + log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1)) + ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1 + + if need_to_install(ctx, role, sha1): + if teuth_config.use_shaman: + version = builder.scm_version + else: + version = builder.version + if not version: + raise VersionNotFoundError(builder.base_url) + need_install = sha1 + need_version = version + + if need_install: + install_firmware(ctx, {role: need_install}) + download_kernel(ctx, {role: need_install}) + install_and_reboot(ctx, {role: need_install}, config) + wait_for_reboot(ctx, {role: need_version}, timeout, config) + + # enable or disable kdb if specified, otherwise do not touch + if role_config.get('kdb') is not None: + kdb = role_config.get('kdb') + enable_disable_kdb(ctx, {role: kdb}) + diff --git a/teuthology/task/knfsd.py b/teuthology/task/knfsd.py new file mode 100644 index 0000000000..100671d822 --- /dev/null +++ b/teuthology/task/knfsd.py @@ -0,0 +1,169 @@ +""" +Export/Unexport a ``nfs server`` client. +""" +import contextlib +import logging +import os + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def get_nfsd_args(remote, cmd): + args=[ + 'sudo', + 'service', + 'nfs', + cmd, + ] + if remote.os.package_type == 'deb': + args[2] = 'nfs-kernel-server' + return args + +@contextlib.contextmanager +def task(ctx, config): + """ + Export/Unexport a ``nfs server`` client. + + The config is optional and defaults to exporting on all clients. If + a config is given, it is expected to be a list or dict of clients to do + this operation on. You must have specified ``ceph-fuse`` or + ``kclient`` on all clients specified for knfsd. + + Example that exports all clients:: + + tasks: + - ceph: + - kclient: + - knfsd: + - interactive: + + Example that uses both ``kclient` and ``ceph-fuse``:: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - kclient: [client.1] + - knfsd: [client.0, client.1] + - interactive: + + Example that specifies export options:: + + tasks: + - ceph: + - kclient: [client.0, client.1] + - knfsd: + client.0: + options: [rw,root_squash] + client.1: + - interactive: + + Note that when options aren't specified, rw,no_root_squash is the default. + When you specify options, the defaults are as specified by exports(5). + + So if empty options are specified, i.e. options: [] these are the defaults: + ro,sync,wdelay,hide,nocrossmnt,secure,root_squash,no_all_squash, + no_subtree_check,secure_locks,acl,anonuid=65534,anongid=65534 + + :param ctx: Context + :param config: Configuration + """ + log.info('Exporting nfs server...') + + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) + + for id_, remote in clients: + mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) + client_config = config.get("client.%s" % id_) + if client_config is None: + client_config = {} + log.debug("Client client.%s config is %s" % (id_, client_config)) + + if client_config.get('options') is not None: + opts = ','.join(client_config.get('options')) + else: + opts = 'rw,no_root_squash' + + # Undocumented option to export to any client in case + # testing in interactive mode from other unspecified clients. + wildcard = False + if client_config.get('wildcard') is not None: + wildcard = True + + log.info('Exporting knfsd client.{id} at {remote} *:{mnt} ({opt})...'.format( + id=id_, remote=remote, mnt=mnt, opt=opts)) + + """ + Should the user want to run with root_squash enabled, there is no + way to write anything to the initial ceph root dir which is set to + rwxr-xr-x root root. + + This could possibly break test cases that make assumptions about + the initial state of the root dir. + """ + remote.run( + args=[ + 'sudo', + 'chmod', + "777", + '{MNT}'.format(MNT=mnt), + ], + ) + """ + Start NFS kernel server + """ + remote.run( args=get_nfsd_args(remote, 'restart') ) + args=[ + 'sudo', + "exportfs", + '-o', + 'fsid=123{id},{opt}'.format(id=id_,opt=opts), + ] + + if wildcard: + args += ['*:{MNT}'.format(MNT=mnt)] + else: + """ + DEFAULT + Prevent bogus clients from old runs from access our + export. Specify all specify node addresses for this run. + """ + ips = [host for (host, _) in (remote.ssh.get_transport().getpeername() for (remote, roles) in ctx.cluster.remotes.items())] + for ip in ips: + args += [ '{ip}:{MNT}'.format(ip=ip, MNT=mnt) ] + + log.info('remote run {args}'.format(args=args)) + remote.run( args=args ) + + try: + yield + finally: + log.info('Unexporting nfs server...') + for id_, remote in clients: + log.debug('Unexporting client client.{id}...'.format(id=id_)) + mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) + try: + log.debug('Checking active files on mount {mnt}'.format(mnt=mnt)) + remote.run( + args=[ + 'sudo', + 'lsof', '-V', '+D', + '{mnt}'.format(mnt=mnt), + ], + check_status=False + ) + finally: + log.debug('Stopping NFS server on client.{id}...'.format(id=id_)) + remote.run( args=get_nfsd_args(remote, 'stop') ) + log.debug('Syncing client client.{id}'.format(id=id_)) + remote.run( + args=[ + 'sync' + ] + ) diff --git a/teuthology/task/localdir.py b/teuthology/task/localdir.py new file mode 100644 index 0000000000..8a84514651 --- /dev/null +++ b/teuthology/task/localdir.py @@ -0,0 +1,69 @@ +""" +Localdir +""" +import contextlib +import logging +import os + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Create a mount dir 'client' that is just the local disk: + + Example that "mounts" all clients: + + tasks: + - localdir: + - interactive: + + Example for a specific client: + + tasks: + - localdir: [client.2] + - interactive: + + :param ctx: Context + :param config: Configuration + """ + log.info('Creating local mnt dirs...') + + testdir = teuthology.get_testdir(ctx) + + if config is None: + config = list('client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, + 'client')) + + clients = list(teuthology.get_clients(ctx=ctx, roles=config)) + for id_, remote in clients: + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + log.info('Creating dir {remote} {mnt}...'.format( + remote=remote, mnt=mnt)) + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + + try: + yield + + finally: + log.info('Removing local mnt dirs...') + for id_, remote in clients: + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + remote.run( + args=[ + 'rm', + '-rf', + '--', + mnt, + ], + ) diff --git a/teuthology/task/lockfile.py b/teuthology/task/lockfile.py new file mode 100644 index 0000000000..63ff9f3b12 --- /dev/null +++ b/teuthology/task/lockfile.py @@ -0,0 +1,241 @@ +""" +Locking tests +""" +import logging +import os + +from teuthology.orchestra import run +from teuthology import misc as teuthology +import time +import gevent + + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + This task is designed to test locking. It runs an executable + for each lock attempt you specify, at 0.01 second intervals (to + preserve ordering of the locks). + You can also introduce longer intervals by setting an entry + as a number of seconds, rather than the lock dictionary. + The config is a list of dictionaries. For each entry in the list, you + must name the "client" to run on, the "file" to lock, and + the "holdtime" to hold the lock. + Optional entries are the "offset" and "length" of the lock. You can also specify a + "maxwait" timeout period which fails if the executable takes longer + to complete, and an "expectfail". + + An example:: + + tasks: + - ceph: + - ceph-fuse: [client.0, client.1] + - lockfile: + [{client:client.0, file:testfile, holdtime:10}, + {client:client.1, file:testfile, holdtime:0, maxwait:0, expectfail:true}, + {client:client.1, file:testfile, holdtime:0, maxwait:15, expectfail:false}, + 10, + {client: client.1, lockfile: testfile, holdtime: 5}, + {client: client.2, lockfile: testfile, holdtime: 5, maxwait: 1, expectfail: True}] + + + In the past this test would have failed; there was a bug where waitlocks weren't + cleaned up if the process failed. More involved scenarios are also possible. + + :param ctx: Context + :param config: Configuration + """ + log.info('Starting lockfile') + try: + assert isinstance(config, list), \ + "task lockfile got invalid config" + + log.info("building executable on each host") + buildprocs = list() + # build the locker executable on each client + clients = list() + files = list() + for op in config: + if not isinstance(op, dict): + continue + log.info("got an op") + log.info("op['client'] = %s", op['client']) + clients.append(op['client']) + files.append(op['lockfile']) + if not "expectfail" in op: + op["expectfail"] = False + badconfig = False + if not "client" in op: + badconfig = True + if not "lockfile" in op: + badconfig = True + if not "holdtime" in op: + badconfig = True + if badconfig: + raise KeyError("bad config {op_}".format(op_=op)) + + testdir = teuthology.get_testdir(ctx) + clients = set(clients) + files = set(files) + lock_procs = list() + for client in clients: + (client_remote,) = ctx.cluster.only(client).remotes.keys() + log.info("got a client remote") + (_, _, client_id) = client.partition('.') + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) + + proc = client_remote.run( + args=[ + 'mkdir', '-p', '{tdir}/archive/lockfile'.format(tdir=testdir), + run.Raw('&&'), + 'mkdir', '-p', '{tdir}/lockfile'.format(tdir=testdir), + run.Raw('&&'), + 'wget', + '-nv', + '--no-check-certificate', + 'https://raw.github.com/gregsfortytwo/FileLocker/main/sclockandhold.cpp', + '-O', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir), + run.Raw('&&'), + 'g++', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir), + '-o', '{tdir}/lockfile/sclockandhold'.format(tdir=testdir) + ], + logger=log.getChild('lockfile_client.{id}'.format(id=client_id)), + wait=False + ) + log.info('building sclockandhold on client{id}'.format(id=client_id)) + buildprocs.append(proc) + + # wait for builds to finish + run.wait(buildprocs) + log.info('finished building sclockandhold on all clients') + + # create the files to run these locks on + client = clients.pop() + clients.add(client) + (client_remote,) = ctx.cluster.only(client).remotes.keys() + (_, _, client_id) = client.partition('.') + file_procs = list() + for lockfile in files: + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile) + proc = client_remote.run( + args=[ + 'sudo', + 'touch', + filepath, + ], + logger=log.getChild('lockfile_createfile'), + wait=False + ) + file_procs.append(proc) + run.wait(file_procs) + file_procs = list() + for lockfile in files: + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile) + proc = client_remote.run( + args=[ + 'sudo', 'chown', 'ubuntu.ubuntu', filepath + ], + logger=log.getChild('lockfile_createfile'), + wait=False + ) + file_procs.append(proc) + run.wait(file_procs) + log.debug('created files to lock') + + # now actually run the locktests + for op in config: + if not isinstance(op, dict): + assert isinstance(op, int) or isinstance(op, float) + log.info("sleeping for {sleep} seconds".format(sleep=op)) + time.sleep(op) + continue + greenlet = gevent.spawn(lock_one, op, ctx) + lock_procs.append((greenlet, op)) + time.sleep(0.1) # to provide proper ordering + #for op in config + + for (greenlet, op) in lock_procs: + log.debug('checking lock for op {op_}'.format(op_=op)) + result = greenlet.get() + if not result: + raise Exception("Got wrong result for op {op_}".format(op_=op)) + # for (greenlet, op) in lock_procs + + finally: + #cleanup! + if lock_procs: + for (greenlet, op) in lock_procs: + log.debug('closing proc for op {op_}'.format(op_=op)) + greenlet.kill(block=True) + + for client in clients: + (client_remote,) = ctx.cluster.only(client).remotes.keys() + (_, _, client_id) = client.partition('.') + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) + proc = client_remote.run( + args=[ + 'rm', '-rf', '{tdir}/lockfile'.format(tdir=testdir), + run.Raw(';'), + 'sudo', 'rm', '-rf', filepath + ], + wait=True + ) #proc + #done! +# task + +def lock_one(op, ctx): + """ + Perform the individual lock + """ + log.debug('spinning up locker with op={op_}'.format(op_=op)) + timeout = None + proc = None + result = None + (client_remote,) = ctx.cluster.only(op['client']).remotes.keys() + (_, _, client_id) = op['client'].partition('.') + testdir = teuthology.get_testdir(ctx) + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) + + if "maxwait" in op: + timeout = gevent.Timeout(seconds=float(op["maxwait"])) + timeout.start() + try: + proc = client_remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'daemon-helper', + 'kill', + '{tdir}/lockfile/sclockandhold'.format(tdir=testdir), + filepath, + '{holdtime}'.format(holdtime=op["holdtime"]), + '{offset}'.format(offset=op.get("offset", '0')), + '{length}'.format(length=op.get("length", '1')), + ], + logger=log.getChild('lockfile_client.{id}'.format(id=client_id)), + wait=False, + stdin=run.PIPE, + check_status=False + ) + result = proc.wait() + except gevent.Timeout as tout: + if tout is not timeout: + raise + if bool(op["expectfail"]): + result = 1 + if result == 1: + if bool(op["expectfail"]): + log.info("failed as expected for op {op_}".format(op_=op)) + else: + raise Exception("Unexpectedly failed to lock {op_} within given timeout!".format(op_=op)) + finally: #clean up proc + if timeout is not None: + timeout.cancel() + if proc is not None: + proc.stdin.close() + + ret = (result == 0 and not bool(op["expectfail"])) or (result == 1 and bool(op["expectfail"])) + + return ret #we made it through diff --git a/teuthology/task/loop.py b/teuthology/task/loop.py new file mode 100644 index 0000000000..cd48df1cca --- /dev/null +++ b/teuthology/task/loop.py @@ -0,0 +1,45 @@ +""" +Task to loop a list of items +""" +import sys +import logging + +from teuthology import run_tasks + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Loop a sequential group of tasks + + example:: + + - loop: + count: 10 + body: + - tasktest: + - tasktest: + + :param ctx: Context + :param config: Configuration + """ + for i in range(config.get('count', 1)): + stack = [] + try: + for entry in config.get('body', []): + if not isinstance(entry, dict): + entry = ctx.config.get(entry, {}) + ((taskname, confg),) = entry.items() + log.info('In sequential, running task %s...' % taskname) + mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg) + if hasattr(mgr, '__enter__'): + mgr.__enter__() + stack.append(mgr) + finally: + try: + exc_info = sys.exc_info() + while stack: + mgr = stack.pop() + mgr.__exit__(*exc_info) + finally: + del exc_info diff --git a/teuthology/task/mpi.py b/teuthology/task/mpi.py new file mode 100644 index 0000000000..6c709fd171 --- /dev/null +++ b/teuthology/task/mpi.py @@ -0,0 +1,137 @@ +""" +Start mpi processes (and allow commands to be run inside process) +""" +import logging +import re + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + + +def _check_mpi_version(remotes): + """ + Retrieve the MPI version from each of `remotes` and raise an exception + if they are not all the same version. + """ + versions = set() + for remote in remotes: + version_str = remote.sh("mpiexec --version") + try: + version = re.search("^\s+Version:\s+(.+)$", version_str, re.MULTILINE).group(1) + except AttributeError: + raise RuntimeError("Malformed MPI version output: {0}".format(version_str)) + else: + versions.add(version) + + if len(versions) != 1: + raise RuntimeError("MPI version mismatch. Versions are: {0}".format(", ".join(versions))) + else: + log.info("MPI version {0}".format(list(versions)[0])) + + +def task(ctx, config): + """ + Setup MPI and execute commands + + Example that starts an MPI process on specific clients:: + + tasks: + - ceph: + - ceph-fuse: [client.0, client.1] + - ssh_keys: + - mpi: + nodes: [client.0, client.1] + exec: ior ... + + Example that starts MPI processes on all clients:: + + tasks: + - ceph: + - ceph-fuse: + - ssh_keys: + - mpi: + exec: ior ... + + Example that starts MPI processes on all roles:: + + tasks: + - ceph: + - ssh_keys: + - mpi: + nodes: all + exec: ... + + Example that specifies a working directory for MPI processes: + + tasks: + - ceph: + - ceph-fuse: + - pexec: + clients: + - ln -s {testdir}/mnt.* {testdir}/gmnt + - ssh_keys: + - mpi: + exec: fsx-mpi + workdir: {testdir}/gmnt + - pexec: + clients: + - rm -f {testdir}/gmnt + + :param ctx: Context + :param config: Configuration + """ + assert isinstance(config, dict), 'task mpi got invalid config' + assert 'exec' in config, 'task mpi got invalid config, missing exec' + + testdir = teuthology.get_testdir(ctx) + + mpiexec = config['exec'].replace('$TESTDIR', testdir) + hosts = [] + remotes = [] + main_remote = None + if 'nodes' in config: + if isinstance(config['nodes'], str) and config['nodes'] == 'all': + for role in teuthology.all_roles(ctx.cluster): + (remote,) = ctx.cluster.only(role).remotes.keys() + ip,port = remote.ssh.get_transport().getpeername() + hosts.append(ip) + remotes.append(remote) + (main_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys() + elif isinstance(config['nodes'], list): + for role in config['nodes']: + (remote,) = ctx.cluster.only(role).remotes.keys() + ip,port = remote.ssh.get_transport().getpeername() + hosts.append(ip) + remotes.append(remote) + (main_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys() + else: + roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + (main_remote,) = ctx.cluster.only(roles[0]).remotes.keys() + for role in roles: + (remote,) = ctx.cluster.only(role).remotes.keys() + ip,port = remote.ssh.get_transport().getpeername() + hosts.append(ip) + remotes.append(remote) + + # mpich is sensitive to different versions on different nodes + _check_mpi_version(remotes) + + workdir = [] + if 'workdir' in config: + workdir = ['-wdir', config['workdir'].replace('$TESTDIR', testdir) ] + + log.info('mpi rank 0 is: {name}'.format(name=main_remote.name)) + + # write out the mpi hosts file + log.info('mpi nodes: [%s]' % (', '.join(hosts))) + teuthology.write_file(remote=main_remote, + path='{tdir}/mpi-hosts'.format(tdir=testdir), + data='\n'.join(hosts)) + log.info('mpiexec on {name}: {cmd}'.format(name=main_remote.name, cmd=mpiexec)) + args=['mpiexec', '-f', '{tdir}/mpi-hosts'.format(tdir=testdir)] + args.extend(workdir) + args.extend(mpiexec.split(' ')) + main_remote.run(args=args, ) + log.info('mpi task completed') + main_remote.run(args=['rm', '{tdir}/mpi-hosts'.format(tdir=testdir)]) diff --git a/teuthology/task/nfs.py b/teuthology/task/nfs.py new file mode 100644 index 0000000000..5cd3aac81f --- /dev/null +++ b/teuthology/task/nfs.py @@ -0,0 +1,146 @@ +""" +Nfs client tester +""" +import contextlib +import logging +import os + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Mount nfs client (requires nfs server export like knfsd or ganesh) + + Example that mounts a single nfs client:: + + tasks: + - ceph: + - kclient: [client.0] + - knfsd: [client.0] + - nfs: + client.1: + server: client.0 + - interactive: + + Example that mounts multiple nfs clients with options:: + + tasks: + - ceph: + - kclient: [client.0, client.1] + - knfsd: [client.0, client.1] + - nfs: + client.2: + server: client.0 + options: [rw,hard,intr,nfsvers=3] + client.3: + server: client.1 + options: [ro] + - workunit: + clients: + client.2: + - suites/dbench.sh + client.3: + - suites/blogbench.sh + + It is not recommended that the nfs client and nfs server reside on the same node. So in the example above client.0-3 should be on 4 distinct + nodes. The client nfs testing would be using only client.2 and client.3. + """ + log.info('Mounting nfs clients...') + assert isinstance(config, dict) + + clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) + + testdir = teuthology.get_testdir(ctx) + for id_, remote in clients: + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + client_config = config.get("client.%s" % id_) + if client_config is None: + client_config = {} + log.debug("Client client.%s config is %s" % (id_, client_config)) + + assert client_config.get('server') is not None + server = client_config.get('server'); + + svr_id = server[len('client.'):] + svr_mnt = os.path.join(testdir, 'mnt.{id}'.format(id=svr_id)) + + svr_remote = None + all_config = ['client.{id}'.format(id=tmpid) + for tmpid in teuthology.all_roles_of_type(ctx.cluster, 'client')] + all_clients = list(teuthology.get_clients(ctx=ctx, roles=all_config)) + for tmpid, tmpremote in all_clients: + if tmpid == svr_id: + svr_remote = tmpremote + break + + assert svr_remote is not None + svr_remote = svr_remote.name.split('@', 2)[1] + + if client_config.get('options') is not None: + opts = ','.join(client_config.get('options')) + else: + opts = 'rw' + + log.info('Mounting client.{id} from client.{sid}'.format(id=id_, sid=svr_id)) + log.debug('mount -o {opts} {remote}:{svr_mnt} {mnt}'.format( + remote=svr_remote, svr_mnt=svr_mnt, opts=opts, mnt=mnt)) + + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + + remote.run( + args=[ + 'sudo', + "mount", + "-o", + opts, + '{remote}:{mnt}'.format(remote=svr_remote, mnt=svr_mnt), + mnt + ], + ) + + try: + yield + finally: + log.info('Unmounting nfs clients...') + for id_, remote in clients: + log.debug('Unmounting nfs client client.{id}...'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + try: + log.debug('First, syncing client client.{id}'.format(id=id_)) + remote.run( + args=[ + 'sync' + ] + ) + remote.run( + args=[ + 'sudo', + 'lsof', '-V', '+D', + '{mnt}'.format(mnt=mnt), + ], + check_status=False + ) + finally: + remote.run( + args=[ + 'sudo', + 'umount', + mnt, + ], + ) + remote.run( + args=[ + 'rmdir', + '--', + mnt, + ], + ) diff --git a/teuthology/task/nop.py b/teuthology/task/nop.py new file mode 100644 index 0000000000..c7b181403f --- /dev/null +++ b/teuthology/task/nop.py @@ -0,0 +1,13 @@ +""" +Null task +""" +def task(ctx, config): + """ + This task does nothing. + + For example:: + + tasks: + - nop: + """ + pass diff --git a/teuthology/task/parallel.py b/teuthology/task/parallel.py new file mode 100644 index 0000000000..6999c0aae3 --- /dev/null +++ b/teuthology/task/parallel.py @@ -0,0 +1,71 @@ +""" +Task to group parallel running tasks +""" +import sys +import logging + +from teuthology import run_tasks +from teuthology import parallel + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Run a group of tasks in parallel. + + example:: + + - parallel: + - tasktest: + - tasktest: + + You can also define tasks in a top-level section outside of + 'tasks:', and reference them here. + + The referenced section must contain a list of tasks to run + sequentially, or a single task as a dict. The latter is only + available for backwards compatibility with existing suites:: + + tasks: + - parallel: + - tasktest: # task inline + - foo # reference to top-level 'foo' section + - bar # reference to top-level 'bar' section + foo: + - tasktest1: + - tasktest2: + bar: + tasktest: # note the list syntax from 'foo' is preferred + + That is, if the entry is not a dict, we will look it up in the top-level + config. + + Sequential tasks and Parallel tasks can be nested. + """ + + log.info('starting parallel...') + with parallel.parallel() as p: + for entry in config: + if not isinstance(entry, dict): + entry = ctx.config.get(entry, {}) + # support the usual list syntax for tasks + if isinstance(entry, list): + entry = dict(sequential=entry) + ((taskname, confg),) = entry.items() + p.spawn(_run_spawned, ctx, confg, taskname) + + +def _run_spawned(ctx, config, taskname): + """Run one of the tasks (this runs in parallel with others)""" + mgr = {} + try: + log.info('In parallel, running task %s...' % taskname) + mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=config) + if hasattr(mgr, '__enter__'): + mgr.__enter__() + finally: + exc_info = sys.exc_info() + if hasattr(mgr, '__exit__'): + mgr.__exit__(*exc_info) + del exc_info diff --git a/teuthology/task/parallel_example.py b/teuthology/task/parallel_example.py new file mode 100644 index 0000000000..eb9659a81d --- /dev/null +++ b/teuthology/task/parallel_example.py @@ -0,0 +1,58 @@ +""" +Parallel contextmanager test +""" +import contextlib +import logging + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def sequential_test(ctx, config): + """Example contextmanager that executes a command on remote hosts sequentially.""" + for role in config: + """Create a cluster composed of all hosts with the given role, and run the command on them sequentially.""" + log.info('Executing command on all hosts sequentially with role "%s"' % role) + ctx.cluster.only(role).run(args=['sleep', '5', run.Raw(';'), 'date', run.Raw(';'), 'hostname']) + yield + +@contextlib.contextmanager +def parallel_test(ctx, config): + """Example contextmanager that executes a command on remote hosts in parallel.""" + for role in config: + """Create a cluster composed of all hosts with the given role, and run the command on them concurrently.""" + log.info('Executing command on all hosts concurrently with role "%s"' % role) + cluster = ctx.cluster.only(role) + nodes = {} + for remote in cluster.remotes.keys(): + """Call run for each remote host, but use 'wait=False' to have it return immediately.""" + proc = remote.run(args=['sleep', '5', run.Raw(';'), 'date', run.Raw(';'), 'hostname'], wait=False,) + nodes[remote.name] = proc + for name, proc in nodes.items(): + """Wait for each process to finish before yielding and allowing other contextmanagers to run.""" + proc.wait() + yield + +@contextlib.contextmanager +def task(ctx, config): + """This is the main body of the task that gets run.""" + + """Take car of some yaml parsing here""" + if config is not None and not isinstance(config, list) and not isinstance(config, dict): + assert(False), "task parallel_example only supports a list or dictionary for configuration" + if config is None: + config = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + """Run Multiple contextmanagers sequentially by nesting them.""" + with contextutil.nested( + lambda: parallel_test(ctx=ctx, config=clients), + lambda: sequential_test(ctx=ctx, config=clients), + ): + yield diff --git a/teuthology/task/pcp.j2 b/teuthology/task/pcp.j2 new file mode 100644 index 0000000000..fe82611888 --- /dev/null +++ b/teuthology/task/pcp.j2 @@ -0,0 +1,15 @@ + + +{% if job_id %}job {{ job_id }} {% endif %}performance data + +{% for metric in graphs.keys() %} +{% if mode == 'static' %} +{% set url = graphs[metric].file.split('/')[-1] %} +{% else %} +{% set url = graphs[metric].url %} +{% endif %} +