diff options
Diffstat (limited to 'playbooks/common')
28 files changed, 1401 insertions, 149 deletions
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 6aac70f63..17a177644 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,33 +5,40 @@    become: no    gather_facts: no    tasks: -  - fail: +  - name: Evaluate groups - g_etcd_hosts required +    fail:        msg: This playbook requires g_etcd_hosts to be set -    when: "{{ g_etcd_hosts is not defined }}" +    when: g_etcd_hosts is not defined -  - fail: +  - name: Evaluate groups - g_master_hosts or g_new_master_hosts required +    fail:        msg: This playbook requires g_master_hosts or g_new_master_hosts to be set -    when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}" +    when: g_master_hosts is not defined or g_new_master_hosts is not defined -  - fail: +  - name: Evaluate groups - g_node_hosts or g_new_node_hosts required +    fail:        msg: This playbook requires g_node_hosts or g_new_node_hosts to be set -    when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}" +    when: g_node_hosts is not defined or g_new_node_hosts is not defined -  - fail: +  - name: Evaluate groups - g_lb_hosts required +    fail:        msg: This playbook requires g_lb_hosts to be set -    when: "{{ g_lb_hosts is not defined }}" +    when: g_lb_hosts is not defined -  - fail: +  - name: Evaluate groups - g_nfs_hosts required +    fail:        msg: This playbook requires g_nfs_hosts to be set -    when: "{{ g_nfs_hosts is not defined }}" +    when: g_nfs_hosts is not defined -  - fail: +  - name: Evaluate groups - g_nfs_hosts is single host +    fail:        msg: The nfs group must be limited to one host -    when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" +    when: (groups[g_nfs_hosts] | default([])) | length > 1 -  - fail: +  - name: Evaluate groups - g_glusterfs_hosts required +    fail:        msg: This playbook requires g_glusterfs_hosts to be set -    when: "{{ g_glusterfs_hosts is not defined }}" +    when: g_glusterfs_hosts is not defined    - name: Evaluate oo_all_hosts      add_host: @@ -51,13 +58,13 @@      with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"      changed_when: no -  - name: Evaluate oo_etcd_to_config +  - name: Evaluate oo_first_master      add_host: -      name: "{{ item }}" -      groups: oo_etcd_to_config +      name: "{{ g_master_hosts[0] }}" +      groups: oo_first_master        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"        ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: "{{ g_etcd_hosts | default([]) }}" +    when: g_master_hosts|length > 0      changed_when: no    - name: Evaluate oo_masters_to_config @@ -69,41 +76,59 @@      with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"      changed_when: no -  - name: Evaluate oo_nodes_to_config +  - name: Evaluate oo_etcd_to_config      add_host:        name: "{{ item }}" -      groups: oo_nodes_to_config +      groups: oo_etcd_to_config        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"        ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" +    with_items: "{{ g_etcd_hosts | default([]) }}"      changed_when: no -  # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is -  - name: Add master to oo_nodes_to_config +  - name: Evaluate oo_first_etcd      add_host: -      name: "{{ item }}" -      groups: oo_nodes_to_config +      name: "{{ g_etcd_hosts[0] }}" +      groups: oo_first_etcd        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"        ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: "{{ g_master_hosts | default([]) }}" -    when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}" +    when: g_etcd_hosts|length > 0      changed_when: no -  - name: Evaluate oo_first_etcd +  # We use two groups one for hosts we're upgrading which doesn't include embedded etcd +  # The other for backing up which includes the embedded etcd host, there's no need to +  # upgrade embedded etcd that just happens when the master is updated. +  - name: Evaluate oo_etcd_hosts_to_upgrade      add_host: -      name: "{{ g_etcd_hosts[0] }}" -      groups: oo_first_etcd +      name: "{{ item }}" +      groups: oo_etcd_hosts_to_upgrade +    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" +    changed_when: False + +  - name: Evaluate oo_etcd_hosts_to_backup +    add_host: +      name: "{{ item }}" +      groups: oo_etcd_hosts_to_backup +    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" +    changed_when: False + +  - name: Evaluate oo_nodes_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_nodes_to_config        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -    when: "{{ g_etcd_hosts|length > 0 }}" +      ansible_become: "{{ g_sudo | default(omit) }}" +    with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"      changed_when: no -  - name: Evaluate oo_first_master +  # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is +  - name: Add master to oo_nodes_to_config      add_host: -      name: "{{ g_master_hosts[0] }}" -      groups: oo_first_master +      name: "{{ item }}" +      groups: oo_nodes_to_config        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"        ansible_become: "{{ g_sudo | default(omit) }}" -    when: "{{ g_master_hosts|length > 0 }}" +    with_items: "{{ g_master_hosts | default([]) }}" +    when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool      changed_when: no    - name: Evaluate oo_lb_to_config diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 07b38920f..f4e52869e 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,13 +1,14 @@  ---  # NOTE: requires openshift_facts be run  - name: Verify compatible yum/subscription-manager combination -  hosts: l_oo_all_hosts +  hosts: oo_all_hosts    gather_facts: no    tasks:    # See:    #   https://bugzilla.redhat.com/show_bug.cgi?id=1395047    #   https://bugzilla.redhat.com/show_bug.cgi?id=1282961    #   https://github.com/openshift/openshift-ansible/issues/1138 +  #   Consider the repoquery module for this work    - name: Check for bad combinations of yum and subscription-manager      command: >        {{ repoquery_cmd }} --installed --qf '%{version}' "yum" @@ -16,7 +17,7 @@      when: not openshift.common.is_atomic | bool    - fail:        msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. -    when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" +    when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout  - name: Determine openshift_version to configure on first master    hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml index a30952929..02042c1ef 100644 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml @@ -3,15 +3,10 @@    hosts: oo_masters_to_config:oo_nodes_to_config    gather_facts: no    tasks: -  - include: pre/validate_excluder.yml -    vars: -      excluder: "{{ openshift.common.service_type }}-docker-excluder" -    when: enable_docker_excluder | default(enable_excluders) | default(True) | bool -  - include: pre/validate_excluder.yml -    vars: -      excluder: "{{ openshift.common.service_type }}-excluder" -    when: enable_openshift_excluder | default(enable_excluders) | default(True) | bool - +  # verify the excluders can be upgraded +  - include_role: +      name: openshift_excluder +      tasks_from: verify_upgrade    # disable excluders based on their status    - include_role: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index fb51a0061..9d0333ca8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -1,6 +1,6 @@  ---  - name: Backup etcd -  hosts: etcd_hosts_to_backup +  hosts: oo_etcd_hosts_to_backup    vars:      embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"      etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}" @@ -87,10 +87,10 @@    tasks:    - set_fact:        etcd_backup_completed: "{{ hostvars -                                 | oo_select_keys(groups.etcd_hosts_to_backup) +                                 | oo_select_keys(groups.oo_etcd_hosts_to_backup)                                   | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"    - set_fact: -      etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" +      etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"    - fail:        msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"      when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index fa86d29fb..d9b59edcb 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -5,32 +5,6 @@  # mirrored packages on your own because only the GA and latest versions are  # available in the repos. So for Fedora we'll simply skip this, sorry. -- include: ../../evaluate_groups.yml -  tags: -  - always - -# We use two groups one for hosts we're upgrading which doesn't include embedded etcd -# The other for backing up which includes the embedded etcd host, there's no need to -# upgrade embedded etcd that just happens when the master is updated. -- name: Evaluate additional groups for etcd -  hosts: localhost -  connection: local -  become: no -  tasks: -  - name: Evaluate etcd_hosts_to_upgrade -    add_host: -      name: "{{ item }}" -      groups: etcd_hosts_to_upgrade -    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" -    changed_when: False - -  - name: Evaluate etcd_hosts_to_backup -    add_host: -      name: "{{ item }}" -      groups: etcd_hosts_to_backup -    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" -    changed_when: False -  - name: Backup etcd before upgrading anything    include: backup.yml    vars: @@ -38,9 +12,11 @@    when: openshift_etcd_backup | default(true) | bool  - name: Drop etcdctl profiles -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    tasks: -  - include: roles/etcd/tasks/etcdctl.yml +  - include_role: +      name: etcd_common +      tasks_from: etcdctl.yml  - name: Perform etcd upgrade    include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index a9b5b94e6..45e301315 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -1,6 +1,6 @@  ---  - name: Determine etcd version -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    tasks:    - name: Record RPM based etcd version      command: rpm -qa --qf '%{version}' etcd\* @@ -43,7 +43,7 @@  # I really dislike this copy/pasta but I wasn't able to find a way to get it to loop  # through hosts, then loop through tasks only when appropriate  - name: Upgrade to 2.1 -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    vars:      upgrade_version: '2.1' @@ -52,7 +52,7 @@      when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool  - name: Upgrade RPM hosts to 2.2 -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    vars:      upgrade_version: '2.2' @@ -61,7 +61,7 @@      when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool  - name: Upgrade containerized hosts to 2.2.5 -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    vars:      upgrade_version: 2.2.5 @@ -70,7 +70,7 @@      when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool  - name: Upgrade RPM hosts to 2.3 -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    vars:      upgrade_version: '2.3' @@ -79,7 +79,7 @@      when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool  - name: Upgrade containerized hosts to 2.3.7 -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    vars:      upgrade_version: 2.3.7 @@ -88,7 +88,7 @@      when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool  - name: Upgrade RPM hosts to 3.0 -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    vars:      upgrade_version: '3.0' @@ -97,7 +97,7 @@      when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool  - name: Upgrade containerized hosts to etcd3 image -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    vars:      upgrade_version: 3.0.15 @@ -106,7 +106,7 @@      when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool  - name: Upgrade fedora to latest -  hosts: etcd_hosts_to_upgrade +  hosts: oo_etcd_hosts_to_upgrade    serial: 1    tasks:    - include: fedora_tasks.yml diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index cbf6d58b3..0f421928b 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -10,17 +10,6 @@  - include: ../initialize_facts.yml -- name: Ensure clean repo cache in the event repos have been changed manually -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - name: Clean package cache -    command: "{{ ansible_pkg_mgr }} clean all" -    when: not openshift.common.is_atomic | bool -    args: -      warn: no -  - name: Ensure firewall is not switched during upgrade    hosts: oo_all_hosts    tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml deleted file mode 100644 index 6de1ed061..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# input variables: -# - repoquery_cmd -# - excluder -# - openshift_upgrade_target -- block: -  - name: Get available excluder version -    command: > -      {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" -    register: excluder_version -    failed_when: false -    changed_when: false - -  - name: Docker excluder version detected -    debug: -      msg: "{{ excluder }}: {{ excluder_version.stdout }}" - -  - name: Printing upgrade target version -    debug: -      msg: "{{ openshift_upgrade_target }}" - -  - name: Check the available {{ excluder }} version is at most of the upgrade target version -    fail: -      msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version" -    when: -    - "{{ excluder_version.stdout != '' }}" -    - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}" -  when: -  - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c6e799261..0ad934d2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,17 +2,6 @@  ###############################################################################  # Upgrade Masters  ############################################################################### -- name: Evaluate additional groups for upgrade -  hosts: localhost -  connection: local -  become: no -  tasks: -  - name: Evaluate etcd_hosts_to_backup -    add_host: -      name: "{{ item }}" -      groups: etcd_hosts_to_backup -    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" -    changed_when: False  # If facts cache were for some reason deleted, this fact may not be set, and if not set  # it will always default to true. This causes problems for the etcd data dir fact detection diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 88f2ddc78..83d2cec81 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -63,12 +63,12 @@    - block:      - debug:          msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" -      when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}" +      when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]      - debug:          msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" -      when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}" -    when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}" +      when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates +    when: openshift_master_scheduler_predicates | default(none) is not none    # Handle cases where openshift_master_predicates is not defined    - block: @@ -87,7 +87,7 @@        when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and                  openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" -    when: "{{ openshift_master_scheduler_predicates | default(none) is none }}" +    when: openshift_master_scheduler_predicates | default(none) is none  # Upgrade priorities @@ -120,12 +120,12 @@    - block:      - debug:          msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" -      when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}" +      when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]      - debug:          msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" -      when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}" -    when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}" +      when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities +    when: openshift_master_scheduler_priorities | default(none) is not none    # Handle cases where openshift_master_priorities is not defined    - block: @@ -144,7 +144,7 @@        when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and                  openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" -    when: "{{ openshift_master_scheduler_priorities | default(none) is none }}" +    when: openshift_master_scheduler_priorities | default(none) is none  # Update scheduler diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 68c71a132..d69472fad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -53,7 +53,7 @@      dest: "{{ openshift.common.config_base}}/master/master-config.yaml"      yaml_key: 'admissionConfig.pluginConfig'      yaml_value: "{{ openshift.master.admission_plugin_config }}" -  when: "{{ 'admission_plugin_config' in openshift.master }}" +  when: "'admission_plugin_config' in openshift.master"  - modify_yaml:      dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml new file mode 100644 index 000000000..be18c1edd --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -0,0 +1,107 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" +      openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos and initialize facts on all hosts +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml +  vars: +    node_config_hook: "v3_3/node_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..20dffb44b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,111 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" +      openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on control plane hosts +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml new file mode 100644 index 000000000..14aaf70d6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -0,0 +1,106 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" +      openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on nodes +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  roles: +  - openshift_repos +  tags: +  - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- name: Verify masters are already upgraded +  hosts: oo_masters_to_config +  tags: +  - pre_upgrade +  tasks: +  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." +    when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_nodes_to_upgrade +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml +  vars: +    node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml index 43c2ffcd4..ed89dbe8d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -3,7 +3,7 @@      dest: "{{ openshift.common.config_base}}/master/master-config.yaml"      yaml_key: 'admissionConfig.pluginConfig'      yaml_value: "{{ openshift.master.admission_plugin_config }}" -  when: "{{ 'admission_plugin_config' in openshift.master }}" +  when: "'admission_plugin_config' in openshift.master"  - modify_yaml:      dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..5d6455bef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,105 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" +      openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos and initialize facts on all hosts +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..c76920586 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,111 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" +      openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on control plane hosts +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..f397f6015 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" +      openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on nodes +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  roles: +  - openshift_repos +  tags: +  - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- name: Verify masters are already upgraded +  hosts: oo_masters_to_config +  tags: +  - pre_upgrade +  tasks: +  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." +    when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_nodes_to_upgrade +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml new file mode 100644 index 000000000..7cedfb1ca --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -0,0 +1,111 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" +      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos and initialize facts on all hosts +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. +#       So it is necessary to run the play after running disable_excluder.yml. +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: validator.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml new file mode 100644 index 000000000..0198074ed --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -0,0 +1,115 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" +      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on control plane hosts +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: validator.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml new file mode 100644 index 000000000..2b16875f4 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" +      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on nodes +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  roles: +  - openshift_repos +  tags: +  - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- name: Verify masters are already upgraded +  hosts: oo_masters_to_config +  tags: +  - pre_upgrade +  tasks: +  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." +    when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_nodes_to_upgrade +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..4604bdc8b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,111 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: '3.6' +      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos and initialize facts on all hosts +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. +#       So it is necassary to run the play after running disable_excluder.yml. +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: validator.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..a09097ed9 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,115 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: '3.6' +      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on control plane hosts +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config +  tags: +  - pre_upgrade +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: validator.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_etcd_to_config +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..7640f2116 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml +  tags: +  - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_upgrade_target: '3.6' +      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml +  tags: +  - pre_upgrade + +- name: Update repos on nodes +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config +  roles: +  - openshift_repos +  tags: +  - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_upgrade +  tags: +  - pre_upgrade +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml +  tags: +  - pre_upgrade + +- include: ../disable_excluder.yml +  tags: +  - pre_upgrade + +- include: ../../initialize_openshift_version.yml +  tags: +  - pre_upgrade +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False + +    # We skip the docker role at this point in upgrade to prevent +    # unintended package, container, or config upgrades which trigger +    # docker restarts. At this early stage of upgrade we can assume +    # docker is configured and running. +    skip_docker_role: True + +- name: Verify masters are already upgraded +  hosts: oo_masters_to_config +  tags: +  - pre_upgrade +  tasks: +  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." +    when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml +  tags: +  - pre_upgrade + +- include: ../pre/gate_checks.yml +  tags: +  - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_nodes_to_upgrade +  tasks: +  - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 92f16dc47..ab0045a39 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -51,7 +51,7 @@      changed_when: false  - name: Configure docker hosts -  hosts: oo_masters_to-config:oo_nodes_to_config +  hosts: oo_masters_to_config:oo_nodes_to_config    vars:      docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"      docker_insecure_registries: "{{ lookup('oo_option',  'docker_insecure_registries') | oo_split }}" diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml index be050c12c..0014a5dbd 100644 --- a/playbooks/common/openshift-node/network_manager.yml +++ b/playbooks/common/openshift-node/network_manager.yml @@ -1,6 +1,6 @@  ---  - name: Install and configure NetworkManager -  hosts: l_oo_all_hosts +  hosts: oo_all_hosts    become: yes    tasks:    - name: install NetworkManager  | 
