diff options
Diffstat (limited to 'playbooks')
52 files changed, 535 insertions, 759 deletions
| diff --git a/playbooks/README.md b/playbooks/README.md new file mode 100644 index 000000000..5857a9f59 --- /dev/null +++ b/playbooks/README.md @@ -0,0 +1,19 @@ +# openshift-ansible playbooks + +In summary: + +- [`byo`](byo) (_Bring Your Own_ hosts) has the most actively maintained +  playbooks for installing, upgrading and performing others tasks on OpenShift +  clusters. +- [`common`](common) has a set of playbooks that are included by playbooks in +  `byo` and others. + +And: + +- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community +  supported and not officially maintained. +- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack) +  are related to the [`bin/cluster`](../bin) tool and its usage is deprecated. + +Refer to the `README.md` file in each playbook directory for more information +about them. diff --git a/playbooks/adhoc/README.md b/playbooks/adhoc/README.md new file mode 100644 index 000000000..69b9d3135 --- /dev/null +++ b/playbooks/adhoc/README.md @@ -0,0 +1,5 @@ +# _Ad hoc_ playbooks + +This directory holds playbooks and tasks that really don't have a better home. +Existing playbooks living here are community supported and not officially +maintained. diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py index c19274e06..daff68fbe 100644 --- a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py @@ -5,22 +5,11 @@  Custom filters for use in openshift-ansible  ''' -import pdb -  class FilterModule(object):      ''' Custom ansible filters '''      @staticmethod -    def oo_pdb(arg): -        ''' This pops you into a pdb instance where arg is the data passed in -            from the filter. -            Ex: "{{ hostvars | oo_pdb }}" -        ''' -        pdb.set_trace() -        return arg - -    @staticmethod      def translate_volume_name(volumes, target_volume):          '''              This filter matches a device string /dev/sdX to /dev/xvdX diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml deleted file mode 100644 index 318396bcc..000000000 --- a/playbooks/adhoc/noc/create_host.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: 'Create a host object in zabbix' -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  roles: -    - os_zabbix -  post_tasks: - -    - zbxapi: -        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php -        zbx_class: Template -        state: list -        params: -          host: ctr_test_kwoodson -          filter: -            host: -              - ctr_kwoodson_test_tmpl - -      register: tmpl_results - -    - debug: var=tmpl_results - -#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml -- name: 'Create a host object in zabbix' -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  roles: -    - os_zabbix -  post_tasks: - -    - zbxapi: -        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php -        zbx_class: Host -        state: absent -        params: -          host: ctr_test_kwoodson -          interfaces: -            - type: 1 -              main: 1 -              useip: 1 -              ip: 127.0.0.1 -              dns: "" -              port: 10050 -          groups: -            - groupid: 1 -          templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}" -          output: extend -          filter: -            host: -              - ctr_test_kwoodson - -      register: host_results - -    - debug: var=host_results diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml deleted file mode 100644 index b694aea1b..000000000 --- a/playbooks/adhoc/noc/create_maintenance.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml -- name: 'Create a maintenace object in zabbix' -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  roles: -    - os_zabbix -  vars: -    oo_hostids: '' -    oo_groupids: '' -  post_tasks: -    - assert: -        that: oo_desc is defined - -    - zbxapi: -        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php -        zbx_class: Maintenance -        state: present -        params: -          name: "{{ oo_name }}" -          description: "{{ oo_desc }}" -          active_since: "{{ oo_start }}" -          active_till: "{{ oo_stop }}" -          maintenance_type: "0" -          output: extend -          hostids: "{{ oo_hostids.split(',') | default([]) }}" -          #groupids: "{{ oo_groupids.split(',') | default([]) }}" -          timeperiods: -            - start_time: "{{ oo_start }}" -              period: "{{ oo_stop }}" -          selectTimeperiods: extend - -      register: maintenance - -    - debug: var=maintenance diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml deleted file mode 100644 index 32fc7ce68..000000000 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- name: 'Get current hosts who have triggers that are alerting by trigger description' -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  roles: -    - os_zabbix -  post_tasks: -    - assert: -        that: oo_desc is defined - -    - zbxapi: -        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php -        zbx_class: Trigger -        state: list -        params: -          only_true: true -          output: extend -          selectHosts: extend -          searchWildCardsEnabled: 1 -          search: -            description: "{{ oo_desc }}" -      register: problems - -    - debug: var=problems - -    - set_fact: -        problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}" - -    - debug: var=problem_hosts - -    - add_host: -        name: "{{ item }}" -        groups: problem_hosts_group -      with_items: "{{ problem_hosts }}" - -- name: "Run on problem hosts" -  hosts: problem_hosts_group -  gather_facts: no -  tasks: -    - command: "{{ oo_cmd }}" -      when: oo_cmd is defined diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index bdd92a47d..f0cfa7f55 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -75,6 +75,10 @@  - hosts: nodes    become: yes +  vars: +    node_dirs: +    - "/etc/origin" +    - "/var/lib/origin"    tasks:    - name: unmask services      command: systemctl unmask "{{ item }}" @@ -83,59 +87,66 @@      with_items:      - firewalld -  - name: Remove packages -    package: name={{ item }} state=absent -    when: not is_atomic | bool -    with_items: -    - atomic-enterprise -    - atomic-enterprise-node -    - atomic-enterprise-sdn-ovs -    - atomic-openshift -    - atomic-openshift-clients -    - atomic-openshift-node -    - atomic-openshift-sdn-ovs -    - cockpit-bridge -    - cockpit-docker -    - cockpit-shell -    - cockpit-ws -    - kubernetes-client -    - openshift -    - openshift-node -    - openshift-sdn -    - openshift-sdn-ovs -    - openvswitch -    - origin -    - origin-clients -    - origin-node -    - origin-sdn-ovs -    - tuned-profiles-atomic-enterprise-node -    - tuned-profiles-atomic-openshift-node -    - tuned-profiles-openshift-node -    - tuned-profiles-origin-node - -  - name: Remove flannel package -    package: name=flannel state=absent -    when: openshift_use_flannel | default(false) | bool and not is_atomic | bool - -  - shell: systemctl reset-failed -    changed_when: False - -  - shell: systemctl daemon-reload -    changed_when: False - -  - name: Remove br0 interface -    shell: ovs-vsctl del-br br0 -    changed_when: False -    failed_when: False - -  - name: Remove linux interfaces -    shell: ip link del "{{ item }}" -    changed_when: False -    failed_when: False -    with_items: -    - lbr0 -    - vlinuxbr -    - vovsbr +  - block: +    - block: +      - name: Remove packages +        package: name={{ item }} state=absent +        with_items: +        - atomic-enterprise +        - atomic-enterprise-node +        - atomic-enterprise-sdn-ovs +        - atomic-openshift +        - atomic-openshift-clients +        - atomic-openshift-excluder +        - atomic-openshift-docker-excluder +        - atomic-openshift-node +        - atomic-openshift-sdn-ovs +        - cockpit-bridge +        - cockpit-docker +        - cockpit-shell +        - cockpit-ws +        - kubernetes-client +        - openshift +        - openshift-node +        - openshift-sdn +        - openshift-sdn-ovs +        - openvswitch +        - origin +        - origin-excluder +        - origin-docker-excluder +        - origin-clients +        - origin-node +        - origin-sdn-ovs +        - tuned-profiles-atomic-enterprise-node +        - tuned-profiles-atomic-openshift-node +        - tuned-profiles-openshift-node +        - tuned-profiles-origin-node + +      - name: Remove flannel package +        package: name=flannel state=absent +        when: openshift_use_flannel | default(false) | bool +      when: "{{ not is_atomic | bool }}" + +    - shell: systemctl reset-failed +      changed_when: False + +    - shell: systemctl daemon-reload +      changed_when: False + +    - name: Remove br0 interface +      shell: ovs-vsctl del-br br0 +      changed_when: False +      failed_when: False + +    - name: Remove linux interfaces +      shell: ip link del "{{ item }}" +      changed_when: False +      failed_when: False +      with_items: +      - lbr0 +      - vlinuxbr +      - vovsbr +    when: "{{ openshift_remove_all | default(true) | bool }}"    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true      changed_when: False @@ -172,28 +183,57 @@      failed_when: False      with_items: "{{ exited_containers_to_delete.results }}" -  - shell: docker images | egrep {{ item }} | awk '{ print $3 }' -    changed_when: False -    failed_when: False -    register: images_to_delete +  - block: +    - block: +      - shell: docker images | egrep {{ item }} | awk '{ print $3 }' +        changed_when: False +        failed_when: False +        register: images_to_delete +        with_items: +        - registry\.access\..*redhat\.com/openshift3 +        - registry\.access\..*redhat\.com/aep3 +        - registry\.qe\.openshift\.com/.* +        - registry\.access\..*redhat\.com/rhel7/etcd +        - docker.io/openshift + +      - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" +        changed_when: False +        failed_when: False +        with_items: "{{ images_to_delete.results }}" +      when: "{{ openshift_uninstall_images | default(True) | bool }}" + +    - name: remove sdn drop files +      file: +        path: /run/openshift-sdn +        state: absent + +    - name: Remove files owned by RPMs +      file: path={{ item }} state=absent +      with_items: +      - /etc/sysconfig/openshift-node +      - /etc/sysconfig/openvswitch +      - /run/openshift-sdn +    when: "{{ openshift_remove_all | default(True) | bool }}" + +  - find: path={{ item }} file_type=file +    register: files      with_items: -    - registry\.access\..*redhat\.com/openshift3 -    - registry\.access\..*redhat\.com/aep3 -    - registry\.qe\.openshift\.com/.* -    - registry\.access\..*redhat\.com/rhel7/etcd -    - docker.io/openshift -    when: openshift_uninstall_images | default(True) | bool - -  - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" -    changed_when: False -    failed_when: False -    with_items: "{{ images_to_delete.results }}" -    when: openshift_uninstall_images | default(True) | bool +    - "{{ node_dirs }}" + +  - find: path={{ item }} file_type=directory +    register: directories +    with_items: +    - "{{ node_dirs }}" -  - name: Remove sdn drop files -    file: -      path: /run/openshift-sdn -      state: absent +  - file: path={{ item.1.path }} state=absent +    with_subelements: +    - "{{ files.results | default([]) }}" +    - files + +  - file: path={{ item.1.path }} state=absent +    with_subelements: +    - "{{ directories.results | default([]) }}" +    - files    - name: Remove remaining files      file: path={{ item }} state=absent @@ -205,13 +245,10 @@      - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh      - /etc/openshift      - /etc/openshift-sdn -    - /etc/origin      - /etc/sysconfig/atomic-enterprise-node      - /etc/sysconfig/atomic-openshift-node      - /etc/sysconfig/atomic-openshift-node-dep -    - /etc/sysconfig/openshift-node      - /etc/sysconfig/openshift-node-dep -    - /etc/sysconfig/openvswitch      - /etc/sysconfig/origin-node      - /etc/sysconfig/origin-node      - /etc/sysconfig/origin-node-dep @@ -223,10 +260,8 @@      - /etc/systemd/system/origin-node-dep.service      - /etc/systemd/system/origin-node.service      - /etc/systemd/system/origin-node.service.wants -    - /run/openshift-sdn      - /var/lib/atomic-enterprise      - /var/lib/openshift -    - /var/lib/origin    - name: restart docker      service: name=docker state=restarted @@ -234,9 +269,12 @@    - name: restart NetworkManager      service: name=NetworkManager state=restarted -  - hosts: masters    become: yes +  vars: +    master_dirs: +    - "/etc/origin" +    - "/var/lib/origin"    tasks:    - name: unmask services      command: systemctl unmask "{{ item }}" @@ -248,12 +286,14 @@    - name: Remove packages      package: name={{ item }} state=absent -    when: not is_atomic | bool +    when: not is_atomic | bool and openshift_remove_all | default(True) | bool      with_items:      - atomic-enterprise      - atomic-enterprise-master      - atomic-openshift      - atomic-openshift-clients +    - atomic-openshift-excluder +    - atomic-openshift-docker-excluder      - atomic-openshift-master      - cockpit-bridge      - cockpit-docker @@ -265,6 +305,8 @@      - openshift-master      - origin      - origin-clients +    - origin-excluder +    - origin-docker-excluder      - origin-master      - pacemaker      - pcs @@ -275,6 +317,33 @@    - shell: systemctl daemon-reload      changed_when: False +  - name: Remove files owned by RPMs +    file: path={{ item }} state=absent +    when: openshift_remove_all | default(True) | bool +    with_items: +    - /etc/sysconfig/atomic-openshift-master +    - /etc/sysconfig/openvswitch + +  - find: path={{ item }} file_type=file +    register: files +    with_items: +    - "{{ master_dirs }}" + +  - find: path={{ item }} file_type=directory +    register: directories +    with_items: +    - "{{ master_dirs }}" + +  - file: path={{ item.1.path }} state=absent +    with_subelements: +    - "{{ files.results | default([]) }}" +    - files + +  - file: path={{ item.1.path }} state=absent +    with_subelements: +    - "{{ directories.results | default([]) }}" +    - files +    - name: Remove remaining files      file: path={{ item }} state=absent      with_items: @@ -284,7 +353,6 @@      - /etc/corosync      - /etc/openshift      - /etc/openshift-sdn -    - /etc/origin      - /etc/systemd/system/atomic-openshift-master.service      - /etc/systemd/system/atomic-openshift-master-api.service      - /etc/systemd/system/atomic-openshift-master-controllers.service @@ -295,14 +363,12 @@      - /etc/sysconfig/atomic-enterprise-master      - /etc/sysconfig/atomic-enterprise-master-api      - /etc/sysconfig/atomic-enterprise-master-controllers -    - /etc/sysconfig/atomic-openshift-master      - /etc/sysconfig/atomic-openshift-master-api      - /etc/sysconfig/atomic-openshift-master-controllers      - /etc/sysconfig/origin-master      - /etc/sysconfig/origin-master-api      - /etc/sysconfig/origin-master-controllers      - /etc/sysconfig/openshift-master -    - /etc/sysconfig/openvswitch      - /etc/sysconfig/origin-master      - /etc/sysconfig/origin-master-api      - /etc/sysconfig/origin-master-controllers @@ -310,7 +376,6 @@      - /usr/share/openshift/examples      - /var/lib/atomic-enterprise      - /var/lib/openshift -    - /var/lib/origin      - /var/lib/pacemaker      - /var/lib/pcsd      - /usr/lib/systemd/system/atomic-openshift-master-api.service @@ -331,6 +396,10 @@  - hosts: etcd    become: yes +  vars: +    etcd_dirs: +    - "/etc/etcd" +    - "/var/lib/etcd"    tasks:    - name: unmask services      command: systemctl unmask "{{ item }}" @@ -350,7 +419,7 @@    - name: Remove packages      package: name={{ item }} state=absent -    when: not is_atomic | bool +    when: not is_atomic | bool and openshift_remove_all | default(True) | bool      with_items:      - etcd      - etcd3 @@ -361,13 +430,25 @@    - shell: systemctl daemon-reload      changed_when: False -  - name: Remove remaining files -    file: path={{ item }} state=absent +  - find: path={{ item }} file_type=file +    register: files      with_items: -    - /etc/ansible/facts.d/openshift.fact -    - /etc/etcd -    - /etc/systemd/system/etcd_container.service -    - /etc/profile.d/etcdctl.sh +    - "{{ etcd_dirs }}" + +  - find: path={{ item }} file_type=directory +    register: directories +    with_items: +    - "{{ etcd_dirs }}" + +  - file: path={{ item.1.path }} state=absent +    with_subelements: +    - "{{ files.results | default([]) }}" +    - files + +  - file: path={{ item.1.path }} state=absent +    with_subelements: +    - "{{ directories.results | default([]) }}" +    - files    # Intenationally using rm command over file module because if someone had mounted a filesystem    # at /var/lib/etcd then the contents was not removed correctly @@ -377,6 +458,13 @@        warn: no      failed_when: false +  - name: Remove remaining files +    file: path={{ item }} state=absent +    with_items: +    - /etc/ansible/facts.d/openshift.fact +    - /etc/systemd/system/etcd_container.service +    - /etc/profile.d/etcdctl.sh +  - hosts: lb    become: yes    tasks: @@ -389,7 +477,7 @@    - name: Remove packages      package: name={{ item }} state=absent -    when: not is_atomic | bool +    when: not is_atomic | bool and openshift_remove_all | default(True) | bool      with_items:      - haproxy @@ -403,4 +491,4 @@      file: path={{ item }} state=absent      with_items:      - /etc/ansible/facts.d/openshift.fact -    - /var/lib/haproxy +    - /var/lib/haproxy/stats diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml deleted file mode 100644 index 955f990b7..000000000 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- hosts: localhost -  gather_facts: no -  connection: local -  become: no -  vars: -    g_server: http://localhost:8080/zabbix/api_jsonrpc.php -    g_user: '' -    g_password: '' - -  roles: -  - lib_zabbix - -  post_tasks: -  - name: CLEAN List template for heartbeat -    zbx_template: -      zbx_server: "{{ g_server }}" -      zbx_user: "{{ g_user }}" -      zbx_password: "{{ g_password }}" -      state: list -      name: 'Template Heartbeat' -    register: templ_heartbeat - -  - name: CLEAN List template app zabbix server -    zbx_template: -      zbx_server: "{{ g_server }}" -      zbx_user: "{{ g_user }}" -      zbx_password: "{{ g_password }}" -      state: list -      name: 'Template App Zabbix Server' -    register: templ_zabbix_server - -  - name: CLEAN List template app zabbix server -    zbx_template: -      zbx_server: "{{ g_server }}" -      zbx_user: "{{ g_user }}" -      zbx_password: "{{ g_password }}" -      state: list -      name: 'Template App Zabbix Agent' -    register: templ_zabbix_agent - -  - name: CLEAN List all templates -    zbx_template: -      zbx_server: "{{ g_server }}" -      zbx_user: "{{ g_user }}" -      zbx_password: "{{ g_password }}" -      state: list -    register: templates - -  - debug: var=templ_heartbeat.results - -  - name: Remove templates if heartbeat template is missing -    zbx_template: -      zbx_server: "{{ g_server }}" -      zbx_user: "{{ g_user }}" -      zbx_password: "{{ g_password }}" -      name: "{{ item }}" -      state: absent -    with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" -    when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins deleted file mode 120000 index b0b7a3414..000000000 --- a/playbooks/adhoc/zabbix_setup/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml deleted file mode 100755 index 0fe65b338..000000000 --- a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env ansible-playbook ---- -- include: clean_zabbix.yml -  vars: -    g_server: http://localhost/zabbix/api_jsonrpc.php -    g_user: Admin -    g_password: zabbix diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml deleted file mode 100755 index 0d5e01878..000000000 --- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/ansible-playbook ---- -- hosts: localhost -  gather_facts: no -  connection: local -  become: no -  vars: -    g_server: http://localhost/zabbix/api_jsonrpc.php -    g_user: Admin -    g_password: zabbix -    g_zbx_scriptrunner_user: scriptrunner -    g_zbx_scriptrunner_bastion_host: specialhost.example.com -  roles: -  - role: os_zabbix -    ozb_server: "{{ g_server }}" -    ozb_user: "{{ g_user }}" -    ozb_password: "{{ g_password }}" -    ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}" -    ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}" diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/adhoc/zabbix_setup/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md new file mode 100644 index 000000000..99698b4d0 --- /dev/null +++ b/playbooks/aws/README.md @@ -0,0 +1,4 @@ +# AWS playbooks + +This playbook directory is meant to be driven by [`bin/cluster`](../../bin), +which is community supported and most use is considered deprecated. diff --git a/playbooks/byo/README.md b/playbooks/byo/README.md new file mode 100644 index 000000000..460fd7cf6 --- /dev/null +++ b/playbooks/byo/README.md @@ -0,0 +1,11 @@ +# Bring Your Own hosts playbooks + +This directory has the most actively used, maintained and supported set of +playbooks for installing, upgrading and performing others tasks on OpenShift +clusters. + +Usage is documented in the official OpenShift documentation pages, under the +Advanced Installation topic: + +- [OpenShift Origin: Advanced Installation](https://docs.openshift.org/latest/install_config/install/advanced_install.html) +- [OpenShift Container Platform: Advanced Installation](https://docs.openshift.com/container-platform/latest/install_config/install/advanced_install.html) diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml new file mode 100644 index 000000000..09ab91bbd --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -0,0 +1,35 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on.  See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: ../../common/openshift-cluster/verify_ansible_version.yml + +- name: Create initial host groups for localhost +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tags: +  - always +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: "{{ g_all_hosts | default([]) }}" + +- name: Create initial host groups for all hosts +  hosts: l_oo_all_hosts +  gather_facts: no +  tags: +  - always +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml + +- include: ../../common/openshift-cluster/openshift_logging.yml +  vars: +    openshift_cluster_id: "{{ cluster_id | default('default') }}" +    openshift_debug_level: "{{ debug_level | default(2) }}" +    openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 0d451cf77..dc0bf73a2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -18,20 +18,20 @@  # If a node fails, halt everything, the admin will need to clean up and we  # don't want to carry on, potentially taking out every node. The playbook can safely be re-run  # and will not take any action on a node already running the requested docker version. -- name: Evacuate and upgrade nodes +- name: Drain and upgrade nodes    hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    serial: 1    any_errors_fatal: true    tasks: -  - name: Prepare for Node evacuation +  - name: Prepare for Node draining      command: >        {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false      delegate_to: "{{ groups.oo_first_master.0 }}"      when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade -  - name: Evacuate Node for Kubelet upgrade +  - name: Drain Node for Kubelet upgrade      command: > -      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force +      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} {{ openshift.common.evacuate_or_drain }} --force      delegate_to: "{{ groups.oo_first_master.0 }}"      when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index 561be7859..d337b6f75 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -1,7 +1,5 @@  ---  # Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. -# -# Currently only supports upgrading 1.9.x to >= 1.10.x.  - hosts: localhost    connection: local    become: no diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 4ce815271..84a5a026f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -66,6 +66,10 @@    tags:    - pre_upgrade +- include: ../../../../common/openshift-master/validate_restart.yml +  tags: +  - pre_upgrade +  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml    tags:    - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index d6af71827..7717c95e4 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -71,6 +71,10 @@    tags:    - pre_upgrade +- include: ../../../../common/openshift-master/validate_restart.yml +  tags: +  - pre_upgrade +  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml    tags:    - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml index 496b00697..6b69348b7 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -6,8 +6,8 @@    tags:    - pre_upgrade -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts +- name: Configure the upgrade target for the common upgrade tasks +  hosts: l_oo_all_hosts    tags:    - pre_upgrade    tasks: @@ -66,6 +66,10 @@    tags:    - pre_upgrade +- include: ../../../../common/openshift-master/validate_restart.yml +  tags: +  - pre_upgrade +  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml    tags:    - pre_upgrade @@ -89,6 +93,8 @@    - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml  - include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_4/master_config_upgrade.yml"  - include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index 8cde2ac88..92d7c943a 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -71,6 +71,10 @@    tags:    - pre_upgrade +- include: ../../../../common/openshift-master/validate_restart.yml +  tags: +  - pre_upgrade +  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml    tags:    - pre_upgrade @@ -94,5 +98,7 @@    - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml  - include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_4/master_config_upgrade.yml"  - include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 0a163526a..b60807a71 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -15,4 +15,16 @@    tasks:    - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml -- include: ../../common/openshift-master/restart.yml +- include: ../../common/openshift-cluster/evaluate_groups.yml +- include: ../../common/openshift-master/validate_restart.yml + +- name: Restart masters +  hosts: oo_masters_to_config +  vars: +    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" +  serial: 1 +  tasks: +  - include: restart_hosts.yml +    when: openshift.common.rolling_restart_mode == 'system' +  - include: restart_services.yml +    when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md new file mode 100644 index 000000000..b50292eac --- /dev/null +++ b/playbooks/byo/openshift-preflight/README.md @@ -0,0 +1,43 @@ +# OpenShift preflight checks + +Here we provide an Ansible playbook for detecting potential roadblocks prior to +an install or upgrade. + +Ansible's default operation mode is to fail fast, on the first error. However, +when performing checks, it is useful to gather as much information about +problems as possible in a single run. + +The `check.yml` playbook runs a battery of checks against the inventory hosts +and tells Ansible to ignore intermediate errors, thus giving a more complete +diagnostic of the state of each host. Still, if any check failed, the playbook +run will be marked as having failed. + +To facilitate understanding the problems that were encountered, we provide a +custom callback plugin to summarize execution errors at the end of a playbook +run. + +--- + +*Note that currently the `check.yml` playbook is only useful for RPM-based +installations. Containerized installs are excluded from checks for now, but +might be included in the future if there is demand for that.* + +--- + +## Running + +With an installation of Ansible 2.2 or greater, run the playbook directly +against your inventory file. Here is the step-by-step: + +1. If you haven't done it yet, clone this repository: + +    ```console +    $ git clone https://github.com/openshift/openshift-ansible +    $ cd openshift-ansible +    ``` + +2. Run the playbook: + +    ```console +    $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml +    ``` diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml new file mode 100644 index 000000000..32673d01d --- /dev/null +++ b/playbooks/byo/openshift-preflight/check.yml @@ -0,0 +1,31 @@ +--- +- hosts: OSEv3 +  roles: +    - openshift_preflight/init + +- hosts: OSEv3 +  name: checks that apply to all hosts +  gather_facts: no +  ignore_errors: yes +  roles: +    - openshift_preflight/common + +- hosts: masters +  name: checks that apply to masters +  gather_facts: no +  ignore_errors: yes +  roles: +    - openshift_preflight/masters + +- hosts: nodes +  name: checks that apply to nodes +  gather_facts: no +  ignore_errors: yes +  roles: +    - openshift_preflight/nodes + +- hosts: OSEv3 +  name: verify check results +  gather_facts: no +  roles: +    - openshift_preflight/verify_status diff --git a/playbooks/common/README.md b/playbooks/common/README.md new file mode 100644 index 000000000..0b5e26989 --- /dev/null +++ b/playbooks/common/README.md @@ -0,0 +1,9 @@ +# Common playbooks + +This directory has a generic set of playbooks that are included by playbooks in +[`byo`](../byo), as well as other playbooks related to the +[`bin/cluster`](../../bin) tool. + +Note: playbooks in this directory use generic group names that do not line up +with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks, +requiring an explicit remapping of groups. diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 0f226f5f9..a95cb68b7 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -38,6 +38,9 @@    - set_fact:        openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"      when: openshift_docker_log_options is not defined +  - set_fact: +      openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" +    when: openshift_docker_selinux_enabled is not defined  - include: ../openshift-etcd/config.yml    tags: diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index cd2f2e6aa..ec5b18389 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -26,29 +26,8 @@        logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"        logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"    roles: -  - role: openshift_cli -  - role: openshift_hosted_facts -  - role: openshift_projects -    # TODO: Move standard project definitions to openshift_hosted/vars/main.yml -    # Vars are not accessible in meta/main.yml in ansible-1.9.x -    openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}" -  - role: openshift_serviceaccounts -    openshift_serviceaccounts_names: -    - router -    openshift_serviceaccounts_namespace: default -    openshift_serviceaccounts_sccs: -    - hostnetwork -    when: openshift.common.version_gte_3_2_or_1_2 -  - role: openshift_serviceaccounts -    openshift_serviceaccounts_names: -    - router -    - registry -    openshift_serviceaccounts_namespace: default -    openshift_serviceaccounts_sccs: -    - privileged -    when: not openshift.common.version_gte_3_2_or_1_2    - role: openshift_hosted -  - role: openshift_metrics +  - role: openshift_hosted_metrics      when: openshift_hosted_metrics_deploy | default(false) | bool    - role: openshift_hosted_logging      when: openshift_hosted_logging_deploy | default(false) | bool diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml new file mode 100644 index 000000000..6347cbc26 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -0,0 +1,5 @@ +--- +- name: OpenShift Aggregated Logging +  hosts: oo_first_master +  roles: +  - openshift_logging diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml new file mode 100644 index 000000000..9f38ceea6 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -0,0 +1,5 @@ +--- +- name: OpenShift Metrics +  hosts: oo_first_master +  roles: +  - openshift_metrics diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml index 5f008a045..2383836d4 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -108,10 +108,6 @@                                       | oo_select_keys(groups['oo_etcd_to_config'] | default([]))                                       | oo_collect('openshift.common.hostname')                                       | default(none, true) }}" -    openshift_master_hostnames: "{{ hostvars -                                    | oo_select_keys(groups['oo_masters_to_config'] | default([])) -                                    | oo_collect('openshift.common.all_hostnames') -                                    | oo_flatten | unique }}"      openshift_certificates_redeploy: true    - role: openshift_etcd_client_certificates      etcd_certificates_redeploy: true @@ -204,7 +200,7 @@        cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig      changed_when: False -- name: Serially evacuate all nodes to trigger redeployments +- name: Serially drain all nodes to trigger redeployments    hosts: oo_nodes_to_config    serial: 1    any_errors_fatal: true @@ -222,7 +218,7 @@        was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"      when: openshift_certificates_redeploy_ca | default(false) | bool -  - name: Prepare for node evacuation +  - name: Prepare for node draining      command: >        {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig        manage-node {{ openshift.node.nodename }} @@ -230,11 +226,11 @@      delegate_to: "{{ groups.oo_first_master.0 }}"      when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool -  - name: Evacuate node +  - name: Drain node      command: >        {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig        manage-node {{ openshift.node.nodename }} -      --evacuate --force +      {{ openshift.common.evacuate_or_drain }} --force      delegate_to: "{{ groups.oo_first_master.0 }}"      when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml index d800b289b..1b418920f 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml @@ -19,11 +19,9 @@    when: openshift.common.is_containerized | bool  - name: Wait for master API to come back online -  become: no -  local_action: -    module: wait_for -      host="{{ inventory_hostname }}" -      state=started -      delay=10 -      port="{{ openshift.master.api_port }}" +  wait_for: +    host: "{{ openshift.common.hostname }}" +    state: started +    delay: 10 +    port: "{{ openshift.master.api_port }}"    when: inventory_hostname in groups.oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml index 44ddf97ad..17f8fc6e9 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml @@ -20,7 +20,7 @@  - debug: var=docker_image_count.stdout  - name: Remove all containers and images -  script: nuke_images.sh docker +  script: nuke_images.sh    register: nuke_images_result    when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index e3379f29b..b2a2eac9a 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -30,9 +30,9 @@    changed_when: false  - fail: -    msg: This playbook requires access to Docker 1.10 or later -  # Disable the 1.10 requirement if the user set a specific Docker version -  when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<'))) +    msg: This playbook requires access to Docker 1.12 or later +  # Disable the 1.12 requirement if the user set a specific Docker version +  when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))  # Default l_docker_upgrade to False, we'll set to True if an upgrade is required:  - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 0a972adf6..d0eadf1fc 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -4,6 +4,7 @@    vars:      embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"      timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" +    etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' }}"    roles:    - openshift_facts    tasks: @@ -42,19 +43,32 @@          {{ avail_disk.stdout }} Kb available.      when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) -  # TODO - Refactor containerized backup to use etcd_container to backup the data so we don't rely on -  # the host's etcdctl binary which may be of a different version. - -  # for non containerized and non embedded we should have the correct version of etcd installed already -  # For embedded we need to use the latest because OCP 3.3 uses a version of etcd that can only be backed -  # up with etcd-3.x +  # For non containerized and non embedded we should have the correct version of +  # etcd installed already. So don't do anything. +  # +  # For embedded or containerized we need to use the latest because OCP 3.3 uses +  # a version of etcd that can only be backed up with etcd-3.x and if it's +  # containerized then etcd version may be newer than that on the host so +  # upgrade it. +  # +  # On atomic we have neither yum nor dnf so ansible throws a hard to debug error +  # if you use package there, like this: "Could not find a module for unknown." +  # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668 +  # +  # TODO - We should refactor all containerized backups to use the containerized +  # version of etcd to perform the backup rather than relying on the host's +  # binaries. Until we do that we'll continue to have problems backing up etcd +  # when atomic host has an older version than the version that's running in the +  # container whether that's embedded or not    - name: Install latest etcd for containerized or embedded -    package: name=etcd state=latest -    when: ( openshift.common.is_containerized and not openshift.common.is_atomic ) or embedded_etcd | bool +    package: +      name: etcd +      state: latest +    when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic    - name: Generate etcd backup      command: > -      etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} +      {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}        --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}    - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml index f88981a0b..5f8b59e17 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml @@ -8,8 +8,7 @@  - name: Set new_etcd_image    set_fact: -    new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=') -                        else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}" +    new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"  - name: Pull new etcd image    command: "docker pull {{ new_etcd_image }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index 5ff9521ec..0f8d94737 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -75,7 +75,7 @@    hosts: etcd_hosts_to_upgrade    serial: 1    vars: -    upgrade_version: 3.0.14 +    upgrade_version: 3.0.15    tasks:    - include: containerized_tasks.yml      when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check deleted file mode 100644 index e5c958ebb..000000000 --- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python -""" -Pre-upgrade checks that must be run on a master before proceeding with upgrade. -""" -# This is a script not a python module: -# pylint: disable=invalid-name - -# NOTE: This script should not require any python libs other than what is -# in the standard library. - -__license__ = "ASL 2.0" - -import json -import os -import subprocess -import re - -# The maximum length of container.ports.name -ALLOWED_LENGTH = 15 -# The valid structure of container.ports.name -ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$') -AT_LEAST_ONE_LETTER = re.compile('[a-z]') -# look at OS_PATH for the full path. Default ot 'oc' -OC_PATH = os.getenv('OC_PATH', 'oc') - - -def validate(value): -    """ -    validate verifies that value matches required conventions - -    Rules of container.ports.name validation: - -    * must be less that 16 chars -    * at least one letter -    * only a-z0-9- -    * hyphens can not be leading or trailing or next to each other - -    :Parameters: -       - `value`: Value to validate -    """ -    if len(value) > ALLOWED_LENGTH: -        return False - -    if '--' in value: -        return False - -    # We search since it can be anywhere -    if not AT_LEAST_ONE_LETTER.search(value): -        return False - -    # We match because it must start at the beginning -    if not ALLOWED_CHARS.match(value): -        return False -    return True - - -def list_items(kind): -    """ -    list_items returns a list of items from the api - -    :Parameters: -       - `kind`: Kind of item to access -    """ -    response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind]) -    items = json.loads(response) -    return items.get("items", []) - - -def get(obj, *paths): -    """ -    Gets an object - -    :Parameters: -       - `obj`: A dictionary structure -       - `path`: All other non-keyword arguments -    """ -    ret_obj = obj -    for path in paths: -        if ret_obj.get(path, None) is None: -            return [] -        ret_obj = ret_obj[path] -    return ret_obj - - -# pylint: disable=too-many-arguments -def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid): -    """ -    Prints out results in human friendly way. - -    :Parameters: -       - `namespace`: Namespace of the resource -       - `kind`: Kind of the resource -       - `item_name`: Name of the resource -       - `container_name`: Name of the container. May be "" when kind=Service. -       - `port_name`: Name of the port -       - `invalid_label`: The label of the invalid port. Port.name/targetPort -       - `valid`: True if the port is valid -    """ -    if not valid: -        if len(container_name) > 0: -            print('%s/%s -n %s (Container="%s" %s="%s")' % ( -                kind, item_name, namespace, container_name, invalid_label, port_name)) -        else: -            print('%s/%s -n %s (%s="%s")' % ( -                kind, item_name, namespace, invalid_label, port_name)) - - -def print_validation_header(): -    """ -    Prints the error header. Should run on the first error to avoid -    overwhelming the user. -    """ -    print """\ -At least one port name is invalid and must be corrected before upgrading. -Please update or remove any resources with invalid port names. - -  Valid port names must: - -    * be less that 16 characters -    * have at least one letter -    * contain only a-z0-9- -    * not start or end with - -    * not contain dashes next to each other ('--') -""" - - -def main(): -    """ -    main is the main entry point to this script -    """ -    try: -        # the comma at the end suppresses the newline -        print "Checking for oc ...", -        subprocess.check_output([OC_PATH, 'whoami']) -        print "found" -    except: -        print( -            'Unable to run "%s whoami"\n' -            'Please ensure OpenShift is running, and "oc" is on your system ' -            'path.\n' -            'You can override the path with the OC_PATH environment variable.' -            % OC_PATH) -        raise SystemExit(1) - -    # Where the magic happens -    first_error = True -    for kind, path in [ -            ('deploymentconfigs', ("spec", "template", "spec", "containers")), -            ('replicationcontrollers', ("spec", "template", "spec", "containers")), -            ('pods', ("spec", "containers"))]: -        for item in list_items(kind): -            namespace = item["metadata"]["namespace"] -            item_name = item["metadata"]["name"] -            for container in get(item, *path): -                container_name = container["name"] -                for port in get(container, "ports"): -                    port_name = port.get("name", None) -                    if not port_name: -                        # Unnamed ports are OK -                        continue -                    valid = validate(port_name) -                    if not valid and first_error: -                        first_error = False -                        print_validation_header() -                    pretty_print_errors( -                        namespace, kind, item_name, -                        container_name, "Port.name", port_name, valid) - -    # Services follow a different flow -    for item in list_items('services'): -        namespace = item["metadata"]["namespace"] -        item_name = item["metadata"]["name"] -        for port in get(item, "spec", "ports"): -            port_name = port.get("targetPort", None) -            if isinstance(port_name, int) or port_name is None: -                # Integer only or unnamed ports are OK -                continue -            valid = validate(port_name) -            if not valid and first_error: -                first_error = False -                print_validation_header() -            pretty_print_errors( -                namespace, "services", item_name, "", -                "targetPort", port_name, valid) - -    # If we had at least 1 error then exit with 1 -    if not first_error: -        raise SystemExit(1) - - -if __name__ == '__main__': -    main() - diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh deleted file mode 100644 index 7bf249742..000000000 --- a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -if [ `which dnf 2> /dev/null` ]; then -  installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) -  available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) -else -  installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null) -  available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null) -fi - -echo "---" -echo "curr_version: ${installed}" -echo "avail_version: ${available}" diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 8cac2fb3b..76645ff3f 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -1,5 +1,6 @@  --- -- hosts: localhost +- name: Create l_oo_all_hosts group +  hosts: localhost    connection: local    become: no    gather_facts: no @@ -10,7 +11,8 @@        groups: l_oo_all_hosts      with_items: "{{ g_all_hosts | default([]) }}" -- hosts: l_oo_all_hosts +- name: Include g_*_hosts vars for hosts in group l_oo_all_hosts +  hosts: l_oo_all_hosts    gather_facts: no    tasks:    - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml @@ -46,3 +48,14 @@      when: openshift_docker_log_options is not defined  - include: ../initialize_facts.yml + +- name: Ensure clean repo cache in the event repos have been changed manually +  hosts: oo_all_hosts +  tags: +  - pre_upgrade +  tasks: +  - name: Clean package cache +    command: "{{ ansible_pkg_mgr }} clean all" +    when: not openshift.common.is_atomic | bool +    args: +      warn: no diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py index 1238acb05..673f11889 100755 --- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -146,7 +146,7 @@ def main():      # ignore broad-except error to avoid stack trace to ansible user      # pylint: disable=broad-except -    except Exception, e: +    except Exception as e:          return module.fail_json(msg=str(e)) diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml index ba4d77617..7646e0fa6 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml @@ -19,5 +19,5 @@      when: openshift.common.is_atomic | bool    - fail: -      msg: This playbook requires access to Docker 1.10 or later -    when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<') +      msg: This playbook requires access to Docker 1.12 or later +    when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 9632626a4..c83923dae 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -12,10 +12,6 @@        msg: Verify the correct version was found      when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version -  - name: Clean package cache -    command: "{{ ansible_pkg_mgr }} clean all" -    when: not openshift.common.is_atomic | bool -    - set_fact:        g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"      when: not openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 474e6311e..77b37cdc2 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -30,14 +30,6 @@  - name: Upgrade and backup etcd    include: ./etcd/main.yml -- name: Upgrade master packages -  hosts: oo_masters_to_config -  roles: -  - openshift_facts -  tasks: -  - include: rpm_upgrade.yml component=master -    when: not openshift.common.is_containerized | bool -  # Create service signer cert when missing. Service signer certificate  # is added to master config in the master config hook for v3_3.  - name: Determine if service signer cert must be created @@ -51,15 +43,40 @@  - include: create_service_signer_cert.yml -- name: Upgrade master config and systemd units +# Set openshift_master_facts separately. In order to reconcile +# admission_config's, we currently must run openshift_master_facts and +# then run openshift_facts. +- name: Set OpenShift master facts +  hosts: oo_masters_to_config +  roles: +  - openshift_master_facts + +# The main master upgrade play. Should handle all changes to the system in one pass, with +# support for optional hooks to be defined. +- name: Upgrade master    hosts: oo_masters_to_config +  vars: +    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" +  serial: 1    handlers:    - include: ../../../../roles/openshift_master/handlers/main.yml      static: yes    roles:    - openshift_facts -  - openshift_master_facts -  tasks: +  post_tasks: + +  # Run the pre-upgrade hook if defined: +  - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" +    when: openshift_master_upgrade_pre_hook is defined + +  - include: "{{ openshift_master_upgrade_pre_hook }}" +    when: openshift_master_upgrade_pre_hook is defined + +  - include: rpm_upgrade.yml component=master +    when: not openshift.common.is_containerized | bool + +  - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml +    - include: upgrade_scheduler.yml    - include: "{{ master_config_hook }}" @@ -95,9 +112,26 @@        state: link      when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists -- name: Set master update status to complete -  hosts: oo_masters_to_config -  tasks: +  # Run the upgrade hook prior to restarting services/system if defined: +  - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" +    when: openshift_master_upgrade_hook is defined + +  - include: "{{ openshift_master_upgrade_hook }}" +    when: openshift_master_upgrade_hook is defined + +  - include: ../../openshift-master/restart_hosts.yml +    when: openshift.common.rolling_restart_mode == 'system' + +  - include: ../../openshift-master/restart_services.yml +    when: openshift.common.rolling_restart_mode == 'services' + +  # Run the post-upgrade hook if defined: +  - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}" +    when: openshift_master_upgrade_post_hook is defined + +  - include: "{{ openshift_master_upgrade_post_hook }}" +    when: openshift_master_upgrade_post_hook is defined +    - set_fact:        master_update_complete: True @@ -119,10 +153,6 @@        msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"      when: master_update_failed | length > 0 -# We are now ready to restart master services (or entire system -# depending on openshift_rolling_restart_mode): -- include: ../../openshift-master/restart.yml -  ###############################################################################  # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints  ############################################################################### diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index b3ac34d90..2bb460815 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -1,5 +1,5 @@  --- -- name: Evacuate and upgrade nodes +- name: Drain and upgrade nodes    hosts: oo_nodes_to_upgrade    # This var must be set with -e on invocation, as it is not a per-host inventory var    # and is evaluated early. Values such as "20%" can also be used. @@ -39,9 +39,9 @@      retries: 3      delay: 1 -  - name: Evacuate Node for Kubelet upgrade +  - name: Drain Node for Kubelet upgrade      command: > -      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force +      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} {{ openshift.common.evacuate_or_drain }} --force      delegate_to: "{{ groups.oo_first_master.0 }}"      when: inventory_hostname in groups.oo_nodes_to_upgrade diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 8058d3377..7a334e771 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -74,22 +74,6 @@          public_console_url: "{{ openshift_master_public_console_url | default(None) }}"          ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"          master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -  - openshift_facts: -      role: hosted -      openshift_env: -        openshift_hosted_registry_storage_kind: 'nfs' -    when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0 - -- name: Create temp directory for syncing certs -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - name: Create local temp directory for syncing certs -    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX -    register: g_master_mktemp -    changed_when: False  - name: Determine if session secrets must be generated    hosts: oo_first_master @@ -122,7 +106,6 @@    hosts: oo_masters_to_config    any_errors_fatal: true    vars: -    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"      openshift_master_ha: "{{ openshift.master.ha }}"      openshift_master_count: "{{ openshift.master.master_count }}"      openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" @@ -133,57 +116,19 @@                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}"    roles: -  - role: openshift_master_facts -  - role: openshift_hosted_facts -  - role: openshift_master_certificates +  - role: openshift_master      openshift_ca_host: "{{ groups.oo_first_master.0 }}"      openshift_master_etcd_hosts: "{{ hostvars                                       | oo_select_keys(groups['oo_etcd_to_config'] | default([]))                                       | oo_collect('openshift.common.hostname')                                       | default(none, true) }}" -    openshift_master_hostnames: "{{ hostvars -                                    | oo_select_keys(groups['oo_masters_to_config'] | default([])) -                                    | oo_collect('openshift.common.all_hostnames') -                                    | oo_flatten | unique }}" -  - role: openshift_etcd_client_certificates +    openshift_master_hosts: "{{ groups.oo_masters_to_config }}"      etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"      etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"      etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"      etcd_cert_prefix: "master.etcd-" -    when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config -  - role: openshift_clock -  - role: openshift_cloud_provider -  - role: openshift_builddefaults -  - role: os_firewall -    os_firewall_allow: -    - service: api server https -      port: "{{ openshift.master.api_port }}/tcp" -    - service: api controllers https -      port: "{{ openshift.master.controllers_port }}/tcp" -    - service: skydns tcp -      port: "{{ openshift.master.dns_port }}/tcp" -    - service: skydns udp -      port: "{{ openshift.master.dns_port }}/udp" -  - role: os_firewall -    os_firewall_allow: -    - service: etcd embedded -      port: 4001/tcp -    when: groups.oo_etcd_to_config | default([]) | length == 0 -  - role: openshift_master -    openshift_master_hosts: "{{ groups.oo_masters_to_config }}" -  - role: nickhammond.logrotate -  - role: nuage_master -    when: openshift.common.use_nuage | bool +    post_tasks:    - name: Create group for deployment type      group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}      changed_when: False - -- name: Delete temporary directory on localhost -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - file: name={{ g_master_mktemp.stdout }} state=absent -    changed_when: False diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index ffa23d26a..832301e3d 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -7,12 +7,19 @@    ignore_errors: true    become: yes -# Ensure the api_port is available. -- name: Wait for master API to come back online -  become: no +- name: Wait for master to restart    local_action:      module: wait_for -      host="{{ openshift.common.hostname }}" +      host="{{ inventory_hostname }}"        state=started        delay=10 -      port="{{ openshift.master.api_port }}" +  become: no + +# Now that ssh is back up we can wait for API on the remote system, +# avoiding some potential connection issues from local system: +- name: Wait for master API to come back online +  wait_for: +    host: "{{ openshift.common.hostname }}" +    state: started +    delay: 10 +    port: "{{ openshift.master.api_port }}" diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 25fa10450..508b5a3ac 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -8,16 +8,14 @@    service:      name: "{{ openshift.common.service_type }}-master-api"      state: restarted -  when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +  when: openshift_master_ha | bool  - name: Wait for master API to come back online -  become: no -  local_action: -    module: wait_for -      host="{{ openshift.common.hostname }}" -      state=started -      delay=10 -      port="{{ openshift.master.api_port }}" -  when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +  wait_for: +    host: "{{ openshift.common.hostname }}" +    state: started +    delay: 10 +    port: "{{ openshift.master.api_port }}" +  when: openshift_master_ha | bool  - name: Restart master controllers    service:      name: "{{ openshift.common.service_type }}-master-controllers" @@ -25,4 +23,4 @@    # Ignore errrors since it is possible that type != simple for    # pre-3.1.1 installations.    ignore_errors: true -  when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +  when: openshift_master_ha | bool diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/validate_restart.yml index 7b340887a..5dbb21502 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/validate_restart.yml @@ -1,6 +1,4 @@  --- -- include: ../openshift-cluster/evaluate_groups.yml -  - name: Validate configuration for rolling restart    hosts: oo_masters_to_config    roles: @@ -65,14 +63,3 @@    - set_fact:        current_host: "{{ exists.stat.exists }}"      when: openshift.common.rolling_restart_mode == 'system' - -- name: Restart masters -  hosts: oo_masters_to_config -  vars: -    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" -  serial: 1 -  tasks: -  - include: restart_hosts.yml -    when: openshift.common.rolling_restart_mode == 'system' -  - include: restart_services.yml -    when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index e28da5713..b36c0eedf 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -60,30 +60,8 @@      when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and              openshift_generate_no_proxy_hosts | default(True) | bool }}"    roles: -  - role: openshift_common -  - role: openshift_clock -  - role: openshift_docker -  - role: openshift_node_certificates -    openshift_ca_host: "{{ groups.oo_first_master.0 }}" -  - role: openshift_cloud_provider -  - role: openshift_node_dnsmasq -    when: openshift.common.use_dnsmasq | bool -  - role: os_firewall -    os_firewall_allow: -    - service: Kubernetes kubelet -      port: 10250/tcp -    - service: http -      port: 80/tcp -    - service: https -      port: 443/tcp -    - service: Openshift kubelet ReadOnlyPort -      port: 10255/tcp -    - service: Openshift kubelet ReadOnlyPort udp -      port: 10255/udp -    - service: OpenShift OVS sdn -      port: 4789/udp -      when: openshift.node.use_openshift_sdn | bool    - role: openshift_node +    openshift_ca_host: "{{ groups.oo_first_master.0 }}"  - name: Configure nodes    hosts: oo_nodes_to_config:!oo_containerized_master_nodes @@ -99,30 +77,8 @@      when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and              openshift_generate_no_proxy_hosts | default(True) | bool }}"    roles: -  - role: openshift_common -  - role: openshift_clock -  - role: openshift_docker -  - role: openshift_node_certificates -    openshift_ca_host: "{{ groups.oo_first_master.0 }}" -  - role: openshift_cloud_provider -  - role: openshift_node_dnsmasq -    when: openshift.common.use_dnsmasq | bool -  - role: os_firewall -    os_firewall_allow: -    - service: Kubernetes kubelet -      port: 10250/tcp -    - service: http -      port: 80/tcp -    - service: https -      port: 443/tcp -    - service: Openshift kubelet ReadOnlyPort -      port: 10255/tcp -    - service: Openshift kubelet ReadOnlyPort udp -      port: 10255/udp -    - service: OpenShift OVS sdn -      port: 4789/udp -      when: openshift.node.use_openshift_sdn | bool    - role: openshift_node +    openshift_ca_host: "{{ groups.oo_first_master.0 }}"  - name: Additional node config    hosts: oo_nodes_to_config diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md new file mode 100644 index 000000000..0514d6f50 --- /dev/null +++ b/playbooks/gce/README.md @@ -0,0 +1,4 @@ +# GCE playbooks + +This playbook directory is meant to be driven by [`bin/cluster`](../../bin), +which is community supported and most use is considered deprecated. diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md new file mode 100644 index 000000000..3ce46a76f --- /dev/null +++ b/playbooks/libvirt/README.md @@ -0,0 +1,4 @@ +# libvirt playbooks + +This playbook directory is meant to be driven by [`bin/cluster`](../../bin), +which is community supported and most use is considered deprecated. diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md new file mode 100644 index 000000000..a6d8d6995 --- /dev/null +++ b/playbooks/openstack/README.md @@ -0,0 +1,4 @@ +# OpenStack playbooks + +This playbook directory is meant to be driven by [`bin/cluster`](../../bin), +which is community supported and most use is considered deprecated. | 
