python_version: "python3"
nsupdate_web_repo: "https://github.com/ceph/nsupdate-web.git"
nsupdate_web_path: "/home/{{ nsupdate_web_user }}/nsupdate_web"
-nsupdate_web_branch: "master"
+nsupdate_web_branch: "main"
# The public and private keys must be manually placed on the host;
# The pubkey name must be provided - most likely via group_vars
pubkey_name: "your_pubkey.key"
paddles_container_replicas: 10
paddles_repo: https://github.com/ceph/paddles.git
-paddles_branch: master
+paddles_branch: main
log_host: localhost
dest: /etc/logrotate.d/download.ceph.com
# Used for pushing upstream builds
-# https://github.com/ceph/ceph-build/blob/master/scripts/sync-push
+# https://github.com/ceph/ceph-build/blob/main/scripts/sync-push
- name: Add signer user
user:
name: signer
pulpito_repo: https://github.com/ceph/pulpito.git
pulpito_user: pulpito
-pulpito_branch: master
+pulpito_branch: main
The functions in this role are:
-**rook-jenkins-update:** For updating rook jenkins version to the version defined in the "jenkins_master_image" variable
+**rook-jenkins-update:** For updating rook jenkins version to the version defined in the "jenkins_controller_image" variable
**rook-os-update:** For updating rook jenkins OS packages
Updating the rook jenkins app to version 2.289.1::
- ansible-playbook rook.yml --tags="rook-jenkins-update" --extra-vars="jenkins_master_image=jenkins/jenkins:2.289.1"
+ ansible-playbook rook.yml --tags="rook-jenkins-update" --extra-vars="jenkins_controller_image=jenkins/jenkins:2.289.1"
Updating the rook jenkins OS packages::
The rook jenkins version::
- jenkins_master_image: jenkins/jenkins:2.289.1
+ jenkins_controller_image: jenkins/jenkins:2.289.1
The rook jenkins ssh keyi-pair defined in the aws dashboard::
The rook jenkins instance type::
- master_instance_type: m4.large
+ controller_instance_type: m4.large
The rook jenkins instance aws security group::
The rook jenkins running aws instance name::
- master_name: Prod-Jenkins
+ controller_name: Prod-Jenkins
The rook jenkins instance ssh key::
Available tags are listed below:
- rook-jenkins-update
- Update the rook jenkins app to the version defined in the "jenkins_master_image" variable.
+ Update the rook jenkins app to the version defined in the "jenkins_controller_image" variable.
- rook-os-update
Update the rook jenkins OS packages.
- name: Gather facts
ec2_instance_facts:
filters:
- "tag:Name": "{{ master_name }}"
+ "tag:Name": "{{ controller_name }}"
instance-state-name: running
- register: master_metadata
+ register: controller_metadata
- name: Take a backup image of the Prod-jenkins instance
ec2_ami:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
- instance_id: "{{ master_metadata.instances[0].instance_id }}"
+ instance_id: "{{ controller_metadata.instances[0].instance_id }}"
no_reboot: yes
wait: yes
wait_timeout: 3000
- name: "{{ master_name }}-{{ ansible_date_time.date }}"
+ name: "{{ controller_name }}-{{ ansible_date_time.date }}"
tags:
- Name: "{{ master_name }}-{{ ansible_date_time.date }}"
+ Name: "{{ controller_name }}-{{ ansible_date_time.date }}"
- name: Check if container is running
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" docker ps -a | grep -i jenkins | wc -l
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" docker ps -a | grep -i jenkins | wc -l
register: container
- name: Kill the jenkins container
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo docker kill jenkins
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo docker kill jenkins
when: container.stdout == '1'
- name: Remove the jenkins container
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo docker rm jenkins
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo docker rm jenkins
when: container.stdout == '1'
- name: Start the new jenkins container with the new LTS version
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo docker run -d --name jenkins -p 8080:8080 -p 50000:50000 -v /mnt/jenkins/jenkins:/var/jenkins_home "{{ jenkins_master_image }}"
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo docker run -d --name jenkins -p 8080:8080 -p 50000:50000 -v /mnt/jenkins/jenkins:/var/jenkins_home "{{ jenkins_controller_image }}"
- name: Gather facts
ec2_instance_facts:
filters:
- "tag:Name": "{{ master_name }}"
+ "tag:Name": "{{ controller_name }}"
instance-state-name: running
- register: master_metadata
+ register: controller_metadata
-- name: Take a image of the master
+- name: Take a image of the controller
ec2_ami:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
- instance_id: "{{ master_metadata.instances[0].instance_id }}"
+ instance_id: "{{ controller_metadata.instances[0].instance_id }}"
no_reboot: yes
wait: yes
wait_timeout: 3000
- name: "{{ master_name }}-{{ ansible_date_time.date }}"
+ name: "{{ controller_name }}-{{ ansible_date_time.date }}"
tags:
- Name: "{{ master_name }}-{{ ansible_date_time.date }}"
+ Name: "{{ controller_name }}-{{ ansible_date_time.date }}"
- name: Update apt cache
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo apt-get update
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo apt-get update
- name: Update packages
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo apt-get upgrade -y
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo apt-get upgrade -y
- name: Check if system requires reboot
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" [ -f /var/run/reboot-required ]; echo $?
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" [ -f /var/run/reboot-required ]; echo $?
register: reboot
- name: Reboot if required
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo reboot
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo reboot
ignore_errors: yes
when: reboot.stdout == '0'
- name: Wait for SSH to come up
- wait_for: host={{ master_metadata.instances[0].public_dns_name }} port=22 delay=60 timeout=320 state=started
+ wait_for: host={{ controller_metadata.instances[0].public_dns_name }} port=22 delay=60 timeout=320 state=started
when: reboot.stdout == '0'
- name: Check if old container exist
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" docker ps -a | grep -i jenkins | wc -l
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" docker ps -a | grep -i jenkins | wc -l
register: container
- name: Remove jenkins old container if exist
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo docker rm jenkins
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo docker rm jenkins
when:
- container.stdout == '1'
- reboot.stdout == '0'
- name: Start jenkins container
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ master_metadata.instances[0].public_dns_name }}" sudo docker run -d --name jenkins -p 8080:8080 -p 50000:50000 -v /mnt/jenkins/jenkins:/var/jenkins_home "{{ jenkins_master_image }}"
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ controller_metadata.instances[0].public_dns_name }}" sudo docker run -d --name jenkins -p 8080:8080 -p 50000:50000 -v /mnt/jenkins/jenkins:/var/jenkins_home "{{ jenkins_controller_image }}"
when:
- container.stdout == '1'
- reboot.stdout == '0'
aws_secret_key: "{{ aws_secret_key }}"
key_name: "{{ keypair }}"
group: "{{ security_group }}"
- instance_type: "{{ master_instance_type }}"
+ instance_type: "{{ controller_instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
label: "{{ item.id }} - {{ item.public_ip }}"
- name: Start jenkins container
- command: ssh -i "{{ rook_key }}" ubuntu@"{{ item.public_ip }}" sudo docker run -d --name jenkins -p 8080:8080 -p 50000:50000 -v /mnt/jenkins/jenkins:/var/jenkins_home "{{ jenkins_master_image }}"
+ command: ssh -i "{{ rook_key }}" ubuntu@"{{ item.public_ip }}" sudo docker run -d --name jenkins -p 8080:8080 -p 50000:50000 -v /mnt/jenkins/jenkins:/var/jenkins_home "{{ jenkins_controller_image }}"
with_items: '{{ ec2_instances.instances }}'
loop_control:
label: "{{ item.id }} - {{ item.public_ip }}"
---
-jenkins_master_image: jenkins/jenkins:2.289.1
+jenkins_controller_image: jenkins/jenkins:2.289.1
keypair: root-jenkins-new-key
-master_instance_type: m4.large
+controller_instance_type: m4.large
security_group: rook-jenkins-group
image: ami-0aaf5dbaa4cbe5771
region: us-east-1
aws_tags:
Name: "{{ instance_name }}"
Application: "Jenkins"
-master_name: Prod-Jenkins
+controller_name: Prod-Jenkins
rook_key: "{{ secrets_path | mandatory }}/rook_key.yml"
- Install dependencies required for ``teuthology``
- Create the ``teuthology`` and ``teuthworker`` users which are used for
scheduling and executing tests, respectively
-- Clone ``teuthology`` repos into ``~/src/teuthology_master`` under those user accounts
+- Clone ``teuthology`` repos into ``~/src/teuthology_main`` under those user accounts
- Run ``teuthology``'s ``bootstrap`` script
- Manages user accounts and sudo privileges using the ``test_admins`` group_var in the secrets repo
- Includes a script to keep the ``teuthology`` user's crontab up to date with remote version-controlled versions (``--tags="crontab")
- "{{ teuthology_execution_user }}"
teuthology_repo: https://github.com/ceph/teuthology.git
-teuthology_branch: "master"
+teuthology_branch: "main"
teuthology_yaml_extra: ""
teuthology_ceph_git_base_url: "git://git.ceph.com/"
archive_base: "/home/{{ teuthology_execution_user }}/archive"
-remote_crontab_url: "https://raw.githubusercontent.com/ceph/ceph/master/qa/crontab/teuthology-cronjobs"
+remote_crontab_url: "https://raw.githubusercontent.com/ceph/ceph/main/qa/crontab/teuthology-cronjobs"
- name: Clone the teuthology repo for GitHub PR
git:
repo: "https://github.com/ceph/teuthology"
- dest: /home/{{ item }}/src/teuthology_master
+ dest: /home/{{ item }}/src/teuthology_main
version: "{{ teuthology_branch }}"
refspec: '+refs/pull/{{ teuthology_ghpr }}/*:refs/origin/pr/{{ teuthology_ghpr }}/*'
become_user: "{{ item }}"
- name: Clone the teuthology repo
git:
repo: "{{ teuthology_repo }}"
- dest: /home/{{ item }}/src/teuthology_master
+ dest: /home/{{ item }}/src/teuthology_main
version: "{{ teuthology_branch }}"
become_user: "{{ item }}"
with_items: "{{ teuthology_users }}"
- name: Run bootstrap
shell: NO_CLOBBER=true ./bootstrap
args:
- chdir: /home/{{ item }}/src/teuthology_master/
+ chdir: /home/{{ item }}/src/teuthology_main/
become_user: "{{ item }}"
with_items: "{{ teuthology_users }}"
register: bootstrap
- name: Add teuthology scripts to PATH
lineinfile:
dest: /home/{{ item }}/.profile
- regexp: teuthology_master
- line: 'PATH="$HOME/src/teuthology_master/virtualenv/bin:$PATH"'
+ regexp: teuthology_main
+ line: 'PATH="$HOME/src/teuthology_main/virtualenv/bin:$PATH"'
become_user: "{{ item }}"
with_items: "{{ teuthology_users }}"
- name: Ensure teuthology is usable
shell: "./teuthology --version"
args:
- chdir: /home/{{ item }}/src/teuthology_master/virtualenv/bin/
+ chdir: /home/{{ item }}/src/teuthology_main/virtualenv/bin/
become_user: "{{ item }}"
with_items: "{{ teuthology_users }}"
changed_when: false
user=${TEUTHOLOGY_USERNAME:-"{{ teuthology_execution_user }}"}
export HOME=/home/$user
-export WORKER_HOME=$HOME/src/teuthology_master
+export WORKER_HOME=$HOME/src/teuthology_main
#/usr/share/nginx/html
export WORKER_ARCH=$HOME/archive
# specified.
keys_repo: "https://github.com/ceph/keys"
# Branch of above repo to use
-keys_branch: master
+keys_branch: main
# Where to clone keys_repo on the *local* disk
keys_repo_path: "~/.cache/src/keys"