diff options
Diffstat (limited to 'playbooks')
7 files changed, 329 insertions, 21 deletions
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml index 1c8d99341..0ba11a21b 100644 --- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml @@ -1,4 +1,6 @@ --- +- include: ../../common/openshift-cluster/verify_ansible_version.yml + - hosts: localhost connection: local become: no @@ -8,7 +10,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts | default([]) }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml new file mode 100644 index 000000000..6d1247e0f --- /dev/null +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -0,0 +1,22 @@ +--- +- include: ../../common/openshift-cluster/verify_ansible_version.yml + +- hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml + - add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: "{{ g_all_hosts | default([]) }}" + +- hosts: l_oo_all_hosts + gather_facts: no + tasks: + - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml + +- include: ../../common/openshift-cluster/redeploy-certificates.yml + vars: + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml new file mode 100644 index 000000000..b97906072 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -0,0 +1,245 @@ +--- +- include: evaluate_groups.yml + +- include: initialize_facts.yml + +- include: initialize_openshift_version.yml + +- name: Load openshift_facts + hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config + roles: + - openshift_facts + +- name: Redeploy etcd certificates + hosts: oo_etcd_to_config + any_errors_fatal: true + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_conf_dir: /etc/etcd + etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" + + pre_tasks: + - stat: + path: "{{ etcd_generated_certs_dir }}" + register: etcd_generated_certs_dir_stat + - name: Backup etcd certificates + command: > + tar -czvf /etc/etcd/etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_conf_dir }}/ca.crt + {{ etcd_conf_dir }}/ca + {{ etcd_generated_certs_dir }} + when: etcd_generated_certs_dir_stat.stat.exists + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Remove existing etcd certificates + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ etcd_conf_dir }}/ca.crt" + - "{{ etcd_conf_dir }}/ca" + - "{{ etcd_generated_certs_dir }}" + roles: + - role: openshift_etcd_server_certificates + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_certificates_redeploy: true + +- name: Redeploy master certificates + hosts: oo_masters_to_config + any_errors_fatal: true + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" + pre_tasks: + - stat: + path: "{{ openshift_generated_configs_dir }}" + register: openshift_generated_configs_dir_stat + - name: Backup generated certificate and config directories + command: > + tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz + {{ openshift_generated_configs_dir }} + {{ openshift.common.config_base }}/master + when: openshift_generated_configs_dir_stat.stat.exists + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - name: Remove generated certificate directories + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ openshift_generated_configs_dir }}" + - name: Remove generated certificates + file: + path: "{{ openshift.common.config_base }}/master/{{ item }}" + state: absent + with_items: + - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false) }}" + - "etcd.server.crt" + - "etcd.server.key" + - "master.etcd-client.crt" + - "master.etcd-client.key" + - "master.server.crt" + - "master.server.key" + - "openshift-master.crt" + - "openshift-master.key" + - "openshift-master.kubeconfig" + - name: Remove CA certificate + file: + path: "{{ openshift.common.config_base }}/master/{{ item }}" + state: absent + when: openshift_certificates_redeploy_ca | default(false) | bool + with_items: + - "ca.crt" + - "ca.key" + - "ca.serial.txt" + - "ca-bundle.crt" + roles: + - role: openshift_master_certificates + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + openshift_master_hostnames: "{{ hostvars + | oo_select_keys(groups['oo_masters_to_config'] | default([])) + | oo_collect('openshift.common.all_hostnames') + | oo_flatten | unique }}" + openshift_certificates_redeploy: true + - role: openshift_etcd_client_certificates + etcd_certificates_redeploy: true + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" + etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" + etcd_cert_prefix: "master.etcd-" + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config + +- name: Redeploy node certificates + hosts: oo_nodes_to_config + any_errors_fatal: true + pre_tasks: + - name: Remove CA certificate + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ openshift.common.config_base }}/node/ca.crt" + roles: + - role: openshift_node_certificates + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_certificates_redeploy: true + +- name: Restart etcd + hosts: oo_etcd_to_config + tasks: + - name: restart etcd + service: name=etcd state=restarted + +- name: Stop master services + hosts: oo_masters_to_config + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: stop master + service: name={{ openshift.common.service_type }}-master state=stopped + when: not openshift_master_ha | bool + - name: stop master api + service: name={{ openshift.common.service_type }}-master-api state=stopped + when: openshift_master_ha | bool and openshift_master_cluster_method == 'native' + - name: stop master controllers + service: name={{ openshift.common.service_type }}-master-controllers state=stopped + when: openshift_master_ha | bool and openshift_master_cluster_method == 'native' + +- name: Start master services + hosts: oo_masters_to_config + serial: 1 + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: start master + service: name={{ openshift.common.service_type }}-master state=started + when: not openshift_master_ha | bool + - name: start master api + service: name={{ openshift.common.service_type }}-master-api state=started + when: openshift_master_ha | bool and openshift_master_cluster_method == 'native' + - name: start master controllers + service: name={{ openshift.common.service_type }}-master-controllers state=started + when: openshift_master_ha | bool and openshift_master_cluster_method == 'native' + +- name: Restart masters (pacemaker) + hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: restart master + command: pcs resource restart master + when: openshift_master_ha | bool and openshift_master_cluster_method == 'pacemaker' + +- name: Restart nodes + hosts: oo_nodes_to_config + tasks: + - name: restart node + service: name={{ openshift.common.service_type }}-node state=restarted + +- name: Copy admin client config(s) + hosts: oo_first_master + tasks: + - name: Create temp directory for kubeconfig + command: mktemp -d /tmp/openshift-ansible-XXXXXX + register: mktemp + changed_when: False + + - name: Copy admin client config(s) + command: > + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + +- name: Serially evacuate all nodes to trigger redeployments + hosts: oo_nodes_to_config + serial: 1 + any_errors_fatal: true + tasks: + - name: Determine if node is currently scheduleable + command: > + {{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + get node {{ openshift.common.hostname | lower }} -o json + register: node_output + when: openshift_certificates_redeploy_ca | default(false) | bool + delegate_to: "{{ groups.oo_first_master.0 }}" + changed_when: false + + - set_fact: + was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" + when: openshift_certificates_redeploy_ca | default(false) | bool + + - name: Prepare for node evacuation + command: > + {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + manage-node {{ openshift.common.hostname | lower }} + --schedulable=false + delegate_to: "{{ groups.oo_first_master.0 }}" + when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool + + - name: Evacuate node + command: > + {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + manage-node {{ openshift.common.hostname | lower }} + --evacuate --force + delegate_to: "{{ groups.oo_first_master.0 }}" + when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool + + - name: Set node schedulability + command: > + {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + manage-node {{ openshift.common.hostname | lower }} --schedulable=true + delegate_to: "{{ groups.oo_first_master.0 }}" + when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool + +- name: Delete temporary directory + hosts: oo_first_master + tasks: + - name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml index 20d66522f..03e7b844c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml @@ -13,14 +13,32 @@ failed_when: false when: openshift.common.is_containerized | bool +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + +- debug: var=docker_image_count.stdout + - name: Remove all containers and images script: nuke_images.sh docker register: nuke_images_result when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- service: name=docker state=stopped + - name: Upgrade Docker action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present" +- service: name=docker state=started + - name: Restart containerized services service: name={{ item }} state=started with_items: diff --git a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh index 6b155f7fa..8635eab0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh @@ -15,9 +15,11 @@ then fi # Delete all images (forcefully) -image_ids=`docker images -q` +image_ids=`docker images -aq` if test -n "$image_ids" then - # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144 - docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge." + # Some layers are deleted recursively and are no longer present + # when docker goes to remove them: + docker rmi -f `docker images -aq` || true fi + diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml index b5fbc4af6..42a24eaf8 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre.yml @@ -185,10 +185,12 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config tasks: - - name: Determine available Docker - script: ../files/rpm_versions.sh docker - register: g_docker_version_result - when: not openshift.common.is_atomic | bool + # Only check if docker upgrade is required if docker_upgrade is not + # already set to False. + - include: docker/upgrade_check.yml + when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool + + # Additional checks for Atomic hosts: - name: Determine available Docker shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" @@ -196,18 +198,12 @@ when: openshift.common.is_atomic | bool - set_fact: - g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" - when: not openshift.common.is_atomic | bool - - - set_fact: - g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" when: openshift.common.is_atomic | bool - fail: msg: This playbook requires access to Docker 1.10 or later - when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<') - - # TODO: add check to upgrade ostree to get latest Docker + when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<') - set_fact: pre_upgrade_complete: True diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml index dee086cf5..3ec47d6f3 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml @@ -3,6 +3,34 @@ # The restart playbook should be run after this playbook completes. ############################################################################### +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + tasks: + - name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + + - debug: var=docker_image_count.stdout + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + + - name: Remove unused Docker images for Docker 1.10+ migration + shell: "docker rmi `docker images -aq`" + # Will fail on images still in use: + failed_when: false + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + + - name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + + - debug: var=docker_image_count.stdout + when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + ############################################################################### # Upgrade Masters ############################################################################### @@ -111,11 +139,6 @@ delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_config - # Only check if docker upgrade is required if docker_upgrade is not - # already set to False. - - include: docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool - - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - include: "{{ node_config_hook }}" |