diff options
Diffstat (limited to 'roles')
70 files changed, 1152 insertions, 329 deletions
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 66c3d9cc4..89a84c4df 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -123,7 +123,7 @@    register: asb_client_secret  - set_fact: -    service_ca_crt: asb_client_secret.results.results.0.data['service-ca.crt'] +    service_ca_crt: "{{ asb_client_secret.results.results.0.data['service-ca.crt'] }}"  # Using oc_obj because oc_service doesn't seem to allow annotations  # TODO: Extend oc_service to allow annotations diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 1c830cb4e..fe938e52b 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -21,3 +21,14 @@ l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is stri  l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"  containers_registries_conf_path: /etc/containers/registries.conf + +r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +r_crio_os_firewall_deny: [] +r_crio_os_firewall_allow: +- service: crio +  port: 10010/tcp + + +openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}" diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index b773a417c..62b8a2eb5 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info:      - 7  dependencies:  - role: lib_openshift +- role: lib_os_firewall diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/docker/tasks/crio_firewall.yml new file mode 100644 index 000000000..fbd1ff515 --- /dev/null +++ b/roles/docker/tasks/crio_firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_crio_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_crio_os_firewall_deny }}" + +- when: r_crio_firewall_enabled | bool and r_crio_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_crio_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_crio_os_firewall_deny }}" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 5ea73568a..1539af53f 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -35,4 +35,4 @@    include: systemcontainer_crio.yml    when:      - l_use_crio -    - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] +    - openshift_docker_is_node_or_master | bool diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 13bbd359e..67ede0d21 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -3,16 +3,16 @@  # TODO: Much of this file is shared with container engine tasks  - set_fact:      l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" -  when: l2_docker_insecure_registries +  when: l2_docker_insecure_registries | bool  - set_fact:      l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" -  when: l2_docker_additional_registries +  when: l2_docker_additional_registries | bool  - set_fact:      l_crio_registries: "{{ ['docker.io'] }}" -  when: not l2_docker_additional_registries +  when: not (l2_docker_additional_registries | bool)  - set_fact:      l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" -  when: l2_docker_additional_registries +  when: l2_docker_additional_registries | bool  - set_fact:      l_openshift_image_tag: "{{ openshift_image_tag | string }}" @@ -62,7 +62,7 @@    shell: lsmod | grep overlay    register: l_has_overlay_in_kernel    ignore_errors: yes - +  failed_when: false  - when: l_has_overlay_in_kernel.rc != 0    block: @@ -161,11 +161,19 @@      path: /etc/cni/net.d/      state: directory +- name: setup firewall for CRI-O +  include: crio_firewall.yml +  static: yes +  - name: Configure the CNI network    template:      dest: /etc/cni/net.d/openshift-sdn.conf      src: 80-openshift-sdn.conf.j2 +- name: Fix SELinux Permissions on /var/lib/containers +  command: "restorecon -R /var/lib/containers/" +  changed_when: false +  - name: Start the CRI-O service    systemd:      name: "cri-o" diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 726e8ada7..aa3b35ddd 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -173,4 +173,6 @@  - set_fact:      docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" +- include: registry_auth.yml +  - meta: flush_handlers diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2 index b715c2ffa..93014a80d 100644 --- a/roles/docker/templates/crio.conf.j2 +++ b/roles/docker/templates/crio.conf.j2 @@ -108,7 +108,7 @@ pids_limit = 1024  # log_size_max is the max limit for the container log size in bytes.  # Negative values indicate that no limit is imposed. -log_size_max = -1 +log_size_max = 52428800  # The "crio.image" table contains settings pertaining to the  # management of OCI images. diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 78f231416..4b734d4ed 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -13,8 +13,6 @@ r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'h  # etcd server vars  etcd_conf_dir: '/etc/etcd' -r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd -etcd_system_container_conf_dir: /var/lib/etcd/etc  etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"  etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"  etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" @@ -54,7 +52,7 @@ etcd_is_containerized: False  etcd_is_thirdparty: False  # etcd dir vars -etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" +etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"  # etcd ports and protocols  etcd_client_port: 2379 diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml index 24bd0540d..d2e866416 100644 --- a/roles/etcd/tasks/backup.force_new_cluster.yml +++ b/roles/etcd/tasks/backup.force_new_cluster.yml @@ -3,10 +3,10 @@  - name: Move content of etcd backup under the etcd data directory    command: > -    mv "{{ l_etcd_backup_dir }}/member" "{{ l_etcd_data_dir }}" +    mv "{{ l_etcd_backup_dir }}/member" "{{ etcd_data_dir }}"  - name: Set etcd group for the etcd data directory    command: > -    chown -R etcd:etcd "{{ l_etcd_data_dir }}" +    chown -R etcd:etcd "{{ etcd_data_dir }}"  - include: auxiliary/force_new_cluster.yml diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml index ec1a1989c..ca0d29155 100644 --- a/roles/etcd/tasks/backup/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml @@ -3,7 +3,7 @@  # TODO: replace shell module with command and update later checks  - name: Check available disk space for etcd backup -  shell: df --output=avail -k {{ l_etcd_data_dir }} | tail -n 1 +  shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1    register: l_avail_disk    # AUDIT:changed_when: `false` because we are only inspecting    # state, not manipulating anything @@ -11,7 +11,7 @@  # TODO: replace shell module with command and update later checks  - name: Check current etcd disk usage -  shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1 +  shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1    register: l_etcd_disk_usage    # AUDIT:changed_when: `false` because we are only inspecting    # state, not manipulating anything @@ -44,17 +44,17 @@    - r_etcd_common_embedded_etcd | bool    - not l_ostree_booted.stat.exists | bool -- name: Check selinux label of '{{ l_etcd_data_dir }}' +- name: Check selinux label of '{{ etcd_data_dir }}'    command: > -    stat -c '%C' {{ l_etcd_data_dir }} +    stat -c '%C' {{ etcd_data_dir }}    register: l_etcd_selinux_labels  - debug:      msg: "{{ l_etcd_selinux_labels }}" -- name: Make sure the '{{ l_etcd_data_dir }}' has the proper label +- name: Make sure the '{{ etcd_data_dir }}' has the proper label    command: > -    chcon -t svirt_sandbox_file_t  "{{ l_etcd_data_dir }}" +    chcon -t svirt_sandbox_file_t  "{{ etcd_data_dir }}"    when:    - l_etcd_selinux_labels.rc == 0    - "'svirt_sandbox_file_t' not in l_etcd_selinux_labels.stdout" @@ -68,12 +68,12 @@  # https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6  - name: Check for v3 data store    stat: -    path: "{{ l_etcd_data_dir }}/member/snap/db" +    path: "{{ etcd_data_dir }}/member/snap/db"    register: l_v3_db  - name: Copy etcd v3 data store    command: > -    cp -a {{ l_etcd_data_dir }}/member/snap/db +    cp -a {{ etcd_data_dir }}/member/snap/db      {{ l_etcd_backup_dir }}/member/snap/    when: l_v3_db.stat.exists diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml index 16604bae8..967e5ee66 100644 --- a/roles/etcd/tasks/backup/copy.yml +++ b/roles/etcd/tasks/backup/copy.yml @@ -2,4 +2,4 @@  - name: Copy etcd backup    copy:      src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz" -    dest: "{{ l_etcd_data_dir }}" +    dest: "{{ etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml index 6c75d00a7..a85f533c2 100644 --- a/roles/etcd/tasks/backup/unarchive.yml +++ b/roles/etcd/tasks/backup/unarchive.yml @@ -11,4 +11,4 @@    #   src: "{{ l_etcd_backup_dir }}.tgz"    #   dest: "{{ l_etcd_backup_dir }}"    command: > -    tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ l_etcd_data_dir }}" +    tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml index 3c009f557..3ffa641b3 100644 --- a/roles/etcd/tasks/backup/vars.yml +++ b/roles/etcd/tasks/backup/vars.yml @@ -6,13 +6,10 @@      l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"  - set_fact: -    l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}" - -- set_fact:      l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"  - set_fact:      l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"  - set_fact: -    l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}" +    l_etcd_backup_dir: "{{ etcd_data_dir }}/{{ l_backup_dir_name }}" diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index 26492fb3c..00b8f4a0b 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -12,9 +12,6 @@    - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"    - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"    - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt" -  - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt" -  - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt" -  - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"    register: g_etcd_server_cert_stat_result    when: not etcd_certificates_redeploy | default(false) | bool @@ -141,7 +138,6 @@      state: directory    with_items:    - "{{ etcd_cert_config_dir }}" -  - "{{ etcd_system_container_cert_config_dir }}"    when: etcd_server_certs_missing | bool  - name: Unarchive cert tarball @@ -176,25 +172,8 @@      state: directory    with_items:    - "{{ etcd_ca_dir }}" -  - "{{ etcd_system_container_cert_config_dir }}/ca"    when: etcd_server_certs_missing | bool -- name: Unarchive cert tarball for the system container -  unarchive: -    src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" -    dest: "{{ etcd_system_container_cert_config_dir }}" -  when: -  - etcd_server_certs_missing | bool -  - r_etcd_common_etcd_runtime == 'runc' - -- name: Unarchive etcd ca cert tarballs for the system container -  unarchive: -    src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" -    dest: "{{ etcd_system_container_cert_config_dir }}/ca" -  when: -  - etcd_server_certs_missing | bool -  - r_etcd_common_etcd_runtime == 'runc' -  - name: Delete temporary directory    local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent    become: no diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index 9a6951920..f71d9b551 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -1,7 +1,4 @@  --- -- set_fact: -    l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}" -  - name: Ensure proxies are in the atomic.conf    include_role:      name: openshift_atomic @@ -57,36 +54,13 @@  - name: Systemd reload configuration    systemd: name=etcd_container daemon_reload=yes -- name: Check for previous etcd data store -  stat: -    path: "{{ l_etcd_src_data_dir }}/member/" -  register: src_datastore - -- name: Check for etcd system container data store -  stat: -    path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member" -  register: dest_datastore - -- name: Ensure that etcd system container data dirs exist -  file: path="{{ item }}" state=directory -  with_items: -    - "{{ r_etcd_common_system_container_host_dir }}/etc" -    - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd" - -- name: Copy etcd data store -  command: > -    cp -a {{ l_etcd_src_data_dir }}/member -    {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member -  when: -    - src_datastore.stat.exists -    - not dest_datastore.stat.exists -  - name: Install or Update Etcd system container package    oc_atomic_container:      name: etcd      image: "{{ openshift.etcd.etcd_image }}"      state: latest      values: +      - ETCD_DATA_DIR=/var/lib/etcd        - ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}        - ETCD_NAME={{ etcd_hostname }}        - ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }} @@ -95,11 +69,21 @@        - ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}        - ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}        - ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} -      - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt -      - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt -      - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key -      - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt -      - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt -      - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key -      - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt -      - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt +      - ETCD_CA_FILE={{ etcd_ca_file }} +      - ETCD_CERT_FILE={{ etcd_cert_file }} +      - ETCD_KEY_FILE={{ etcd_key_file }} +      - ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }} +      - ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }} +      - ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }} +      - ETCD_TRUSTED_CA_FILE={{ etcd_ca_file }} +      - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_peer_ca_file }} +      - 'ADDTL_MOUNTS=,{"type":"bind","source":"/etc/","destination":"/etc/","options":["rbind","rw","rslave"]},{"type":"bind","source":"/var/lib/etcd","destination":"/var/lib/etcd/","options":["rbind","rw","rslave"]}' + +- name: Ensure etcd datadir ownership for the system container +  file: +    path: "{{ etcd_data_dir }}" +    state: directory +    mode: 0700 +    owner: root +    group: root +    recurse: True diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 55c44bb84..b17358882 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -72,6 +72,7 @@ class CallbackModule(CallbackBase):          # Set the order of the installer phases          installer_phases = [              'installer_phase_initialize', +            'installer_phase_health',              'installer_phase_etcd',              'installer_phase_nfs',              'installer_phase_loadbalancer', @@ -93,6 +94,10 @@ class CallbackModule(CallbackBase):                  'title': 'Initialization',                  'playbook': ''              }, +            'installer_phase_health': { +                'title': 'Health Check', +                'playbook': 'playbooks/byo/openshift-checks/pre-install.yml' +            },              'installer_phase_etcd': {                  'title': 'etcd Install',                  'playbook': 'playbooks/byo/openshift-etcd/config.yml' @@ -166,11 +171,6 @@ class CallbackModule(CallbackBase):                          self._display.display(                              '\tThis phase can be restarted by running: {}'.format(                                  phase_attributes[phase]['playbook'])) -                else: -                    # Phase was not found in custom stats -                    self._display.display( -                        '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'), -                        color=C.COLOR_SKIP)          self._display.display("", screen_only=True) diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml index ff298dda0..af05d80df 100644 --- a/roles/kuryr/defaults/main.yaml +++ b/roles/kuryr/defaults/main.yaml @@ -5,10 +5,10 @@ kuryr_config_dir: /etc/kuryr  # Kuryr username  kuryr_openstack_username: kuryr -# Kuryr username domain +# Kuryr domain name or id containing user  kuryr_openstack_user_domain_name: default -# Kuryr username domain +# Kuryr domain name or id containing project  kuryr_openstack_project_domain_name: default  # Kuryr OpenShift namespace @@ -31,7 +31,7 @@ cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/  cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"  cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9" -# Kuryr ClusterRole definiton +# Kuryr ClusterRole definition  kuryr_clusterrole:    name: kuryrctl    state: present diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml index d319fdd1a..0cb749dcc 100644 --- a/roles/openshift_aws/tasks/seal_ami.yml +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -1,11 +1,4 @@  --- -- name: Remove any ansible facts created during AMI creation -  file: -    path: "/etc/ansible/facts.d/{{ item }}" -    state: absent -  with_items: -  - openshift.fact -  - name: fetch newly created instances    ec2_remote_facts:      region: "{{ openshift_aws_region }}" diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 index ed9c0ed0b..76aebdcea 100644 --- a/roles/openshift_aws/templates/user_data.j2 +++ b/roles/openshift_aws/templates/user_data.j2 @@ -9,7 +9,7 @@ write_files:    content: |      openshift_group_type: {{ openshift_aws_node_group_type }}  {%   if openshift_aws_node_group_type != 'master' %} -- path: /etc/origin/node/csr_kubeconfig +- path: /etc/origin/node/bootstrap.kubeconfig    owner: 'root:root'    permissions: '0640'    encoding: b64 diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index b3ecd57a6..0c072b64a 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -6,6 +6,5 @@ etcd_ip: "{{ openshift.common.ip }}"  etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"  etcd_cert_prefix:  etcd_cert_config_dir: "/etc/etcd" -etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc  etcd_peer_url_scheme: https  etcd_url_scheme: https diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 7956559c6..87e6146d4 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -1,6 +1,7 @@  """Check that there is enough disk space in predefined paths."""  import tempfile +import os.path  from openshift_checks import OpenShiftCheck, OpenShiftCheckException @@ -121,11 +122,21 @@ class DiskAvailability(OpenShiftCheck):          return {} +    def find_ansible_submounts(self, path): +        """Return a list of ansible_mounts that are below the given path.""" +        base = os.path.join(path, "") +        return [ +            mount +            for mount in self.get_var("ansible_mounts") +            if mount["mount"].startswith(base) +        ] +      def free_bytes(self, path):          """Return the size available in path based on ansible_mounts.""" +        submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path))          mount = self.find_ansible_mount(path)          try: -            return mount['size_available'] +            return mount['size_available'] + submounts          except KeyError:              raise OpenShiftCheckException(                  'Unable to retrieve disk availability for "{path}".\n' diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py index 0558ddf14..6808d8b2f 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_storage.py +++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py @@ -14,7 +14,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):      """      name = "docker_storage" -    tags = ["pre-install", "health", "preflight"] +    tags = ["health", "preflight"]      dependencies = ["python-docker-py"]      storage_drivers = ["devicemapper", "overlay", "overlay2"] diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index 29a325a17..7acdb40ec 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -96,6 +96,24 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):              'size_available': 20 * 10**9 + 1,          }],      ), +    ( +        ['oo_masters_to_config'], +        0, +        [{ +            'mount': '/', +            'size_available': 2 * 10**9, +        }, {  # not enough directly on /var +            'mount': '/var', +            'size_available': 10 * 10**9 + 1, +        }, { +            # but subdir mounts add up to enough +            'mount': '/var/lib/docker', +            'size_available': 20 * 10**9 + 1, +        }, { +            'mount': '/var/lib/origin', +            'size_available': 20 * 10**9 + 1, +        }], +    ),  ])  def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):      task_vars = dict( @@ -104,9 +122,10 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib          ansible_mounts=ansible_mounts,      ) -    result = DiskAvailability(fake_execute_module, task_vars).run() +    check = DiskAvailability(fake_execute_module, task_vars) +    check.run() -    assert not result.get('failed', False) +    assert not check.failures  @pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [ diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 0ea34faf2..6c5bb8693 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -297,6 +297,8 @@ oc delete pod --selector=<ds_selector>  Changelog  --------- +Tue Oct 26, 2017 +- Make CPU request equal limit if limit is greater then request  Tue Oct 10, 2017  - Default imagePullPolicy changed from Always to IfNotPresent  diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index 959573635..e1a5ea726 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -3,6 +3,7 @@  '''  import random +import re  def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'): @@ -17,6 +18,31 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):      return dict(kind='emptydir') +def min_cpu(left, right): +    '''Return the minimum cpu value of the two values given''' +    message = "Unable to evaluate {} cpu value is specified correctly '{}'. Exp whole, decimal or int followed by M" +    pattern = re.compile(r"^(\d*\.?\d*)([Mm])?$") +    millis_per_core = 1000 +    if not right: +        return left +    m_left = pattern.match(left) +    if not m_left: +        raise RuntimeError(message.format("left", left)) +    m_right = pattern.match(right) +    if not m_right: +        raise RuntimeError(message.format("right", right)) +    left_value = float(m_left.group(1)) +    right_value = float(m_right.group(1)) +    if m_left.group(2) not in ["M", "m"]: +        left_value = left_value * millis_per_core +    if m_right.group(2) not in ["M", "m"]: +        right_value = right_value * millis_per_core +    response = left +    if left_value != min(left_value, right_value): +        response = right +    return response + +  def walk(source, path, default, delimiter='.'):      '''Walk the sourch hash given the path and return the value or default if not found'''      if not isinstance(source, dict): @@ -87,6 +113,7 @@ class FilterModule(object):              'random_word': random_word,              'entry_from_named_pair': entry_from_named_pair,              'map_from_pairs': map_from_pairs, +            'min_cpu': min_cpu,              'es_storage': es_storage,              'serviceaccount_name': serviceaccount_name,              'serviceaccount_namespace': serviceaccount_namespace, diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test index 3ad956cca..bac25c012 100644 --- a/roles/openshift_logging/filter_plugins/test +++ b/roles/openshift_logging/filter_plugins/test @@ -1,7 +1,22 @@  import unittest  from openshift_logging import walk +from openshift_logging import min_cpu  class TestFilterMethods(unittest.TestCase): +     + +    def test_min_cpu_for_none(self): +        source = "1000M" +        self.assertEquals(min_cpu(source, None), "1000M") + +    def test_min_cpu_for_millis(self): +        source = "1" +        self.assertEquals(min_cpu(source, "0.1"), "0.1") + + +    def test_min_cpu_for_whole(self): +        source = "120M" +        self.assertEquals(min_cpu(source, "2"), "120M")      def test_walk_find_key(self): diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 3f705d02c..b98e281a3 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -77,21 +77,23 @@    vars:      generated_certs_dir: "{{openshift.common.config_base}}/logging"      openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" -    openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}" -    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" +    openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"      openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" -    openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}" -    openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}" -    _es_containers: "{{item.0.containers}}" +    openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" +    openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}" +    _es_containers: "{{ outer_item.0.containers}}"      _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"    with_together:    - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"    - "{{ openshift_logging_facts.elasticsearch.pvcs }}"    - "{{ es_indices }}" +  loop_control: +    loop_var: outer_item    when:    - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0 @@ -101,13 +103,15 @@    vars:      generated_certs_dir: "{{openshift.common.config_base}}/logging"      openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" -    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"      openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"    with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} +  loop_control: +    loop_var: outer_item  - set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}    with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} @@ -131,8 +135,8 @@    vars:      generated_certs_dir: "{{openshift.common.config_base}}/logging"      openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" -    openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}" -    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" +    openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"      openshift_logging_elasticsearch_ops_deployment: true      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" @@ -143,8 +147,8 @@      openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"      openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"      openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}" -    openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}" -    openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}" +    openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" +    openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"      openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"      openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"      openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" @@ -153,13 +157,16 @@      openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"      openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}"      openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}" -    _es_containers: "{{item.0.containers}}" +    _es_containers: "{{ outer_item.0.containers}}"      _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"    with_together:    - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"    - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"    - "{{ es_ops_indices }}" +  loop_control: +    loop_var: outer_item +    when:    - openshift_logging_use_ops | bool    - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0 @@ -170,7 +177,7 @@    vars:      generated_certs_dir: "{{openshift.common.config_base}}/logging"      openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" -    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"      openshift_logging_elasticsearch_ops_deployment: true      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" @@ -190,6 +197,8 @@      openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"    with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} +  loop_control: +    loop_var: outer_item    when:    - openshift_logging_use_ops | bool diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index b4ddf45d9..fcaf18ed4 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -90,7 +90,7 @@      es_host: "{{ openshift_logging_curator_es_host }}"      es_port: "{{ openshift_logging_curator_es_port }}"      curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" -    curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}" +    curator_cpu_request: "{{ openshift_logging_curator_cpu_request | min_cpu(openshift_logging_curator_cpu_limit | default(none)) }}"      curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"      curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"      curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}" diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh deleted file mode 100644 index 339b5a1b2..000000000 --- a/roles/openshift_logging_elasticsearch/files/es_migration.sh +++ /dev/null @@ -1,79 +0,0 @@ -CA=${1:-/etc/openshift/logging/ca.crt} -KEY=${2:-/etc/openshift/logging/system.admin.key} -CERT=${3:-/etc/openshift/logging/system.admin.crt} -openshift_logging_es_host=${4:-logging-es} -openshift_logging_es_port=${5:-9200} -namespace=${6:-logging} - -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# skip indices that contain a uuid -# get a list of unique project -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -function get_list_of_indices() { -    curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ -        awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ -        '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ -    sort -u -} - -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# get a list of unique project.uuid -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -function get_list_of_proj_uuid_indices() { -    curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ -        awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ -            '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ -        sort -u -} - -if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then -  echo "No Elasticsearch pods found running.  Cannot update common data model." -  exit 1 -fi - -count=$(get_list_of_indices | wc -l) -if [ $count -eq 0 ]; then -  echo No matching indices found - skipping update_for_uuid -else -  echo Creating aliases for $count index patterns . . . -  { -    echo '{"actions":[' -    get_list_of_indices | \ -      while IFS=. read proj ; do -        # e.g. make test.uuid.* an alias of test.* so we can search for -        # /test.uuid.*/_search and get both the test.uuid.* and -        # the test.* indices -        uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null) -        [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}" -      done -    echo ']}' -  } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" -fi - -count=$(get_list_of_proj_uuid_indices | wc -l) -if [ $count -eq 0 ] ; then -    echo No matching indexes found - skipping update_for_common_data_model -    exit 0 -fi - -echo Creating aliases for $count index patterns . . . -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# get a list of unique project.uuid -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -{ -  echo '{"actions":[' -  get_list_of_proj_uuid_indices | \ -    while IFS=. read proj uuid ; do -      # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for -      # /project.test.uuid.*/_search and get both the test.uuid.* and -      # the project.test.uuid.* indices -      echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}" -    done -  echo ']}' -} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 7aabdc861..e7ef443bd 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -354,7 +354,7 @@      image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"      proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"      es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}" -    es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}" +    es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request | min_cpu(openshift_logging_elasticsearch_cpu_limit | default(none)) }}"      es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"      es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"      es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}" diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index f56810610..2f89c3f9f 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -154,7 +154,6 @@        path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"  # create Fluentd daemonset -  # this should change based on the type of fluentd deployment to be done...  # TODO: pass in aggregation configurations  - name: Generate logging-fluentd daemonset definition @@ -173,7 +172,7 @@      fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"      fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"      fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}" -    fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}" +    fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"      fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"      audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"      audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}" diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 809f7a631..8ef8ede9a 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -230,10 +230,10 @@      es_host: "{{ openshift_logging_kibana_es_host }}"      es_port: "{{ openshift_logging_kibana_es_port }}"      kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}" -    kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}" +    kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request | min_cpu(openshift_logging_kibana_cpu_limit | default(none)) }}"      kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"      kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}" -    kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}" +    kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request | min_cpu(openshift_logging_kibana_proxy_cpu_limit | default(none)) }}"      kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"      kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"      kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 1b46a7ac3..5b257139e 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -171,7 +171,7 @@      ops_host: "{{ openshift_logging_mux_ops_host }}"      ops_port: "{{ openshift_logging_mux_ops_port }}"      mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}" -    mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}" +    mux_cpu_request: "{{ openshift_logging_mux_cpu_request | min_cpu(openshift_logging_mux_cpu_limit | default(none)) }}"      mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"      mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"      mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}" diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 3a71d9211..05ca27913 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -38,6 +38,10 @@ deployment type (`openshift_deployment_type`):           * [Cloud Provider](#cloud-provider)           * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only)     * [Customization](#customization) +   * [Container Provider](#container-provider) +      * [Manually](#manually) +      * [Automatically](#automatically) +      * [Multiple Providers](#multiple-providers)     * [Uninstall](#uninstall)     * [Additional Information](#additional-information) @@ -80,30 +84,10 @@ to there being no databases that require pods.  *Be extra careful* if you are overriding template  parameters. Including parameters not defined in a template **will -cause errors**. - -**Container Provider Integration** - If you want add your container -platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you -must ensure that the infrastructure management hooks are installed. - -* During your OCP/Origin install, ensure that you have the -  `openshift_use_manageiq` parameter set to `true` in your inventory -  at install time. This will create a `management-infra` project and a -  service account user. -* After CFME/MIQ is installed, obtain the `management-admin` service -  account token and copy it somewhere safe. - -```bash -$ oc serviceaccounts get-token -n management-infra management-admin -eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig -``` - -* In the CFME/MIQ web interface, navigate to `Compute` → -  `Containers` → `Providers` and select `⚙ Configuration` → `⊕ -  Add a new Containers Provider` - -*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.* - +cause errors**. If you do receive an error during the `Ensure the CFME +App is created` task, we recommend running the +[uninstall scripts](#uninstall) first before running the installer +again.  # Requirements @@ -140,11 +124,13 @@ used in your Ansible inventory to control the behavior of this  installer. -| Variable                                       | Required | Default                        | Description                         | -|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| -| `openshift_management_project`                       | **No**   | `openshift-management`               | Namespace for the installation.     | +| Variable                                             | Required | Default                        | Description                         | +|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| +| `openshift_management_project`                       | **No**   | `openshift-management`         | Namespace for the installation.     |  | `openshift_management_project_description`           | **No**   | *CloudForms Management Engine* | Namespace/project description.      | -| `openshift_management_install_management`                  | **No**   | `false`                        | Boolean, set to `true` to install the application | +| `openshift_management_install_management`            | **No**   | `false`                        | Boolean, set to `true` to install the application | +| `openshift_management_username`                      | **No**   | `admin`                        | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) | +| `openshift_management_password`                      | **No**   | `smartvm`                      | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) |  | **PRODUCT CHOICE**  | | | | |  | `openshift_management_app_template`                  | **No**   | `miq-template`                 | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> |  | **STORAGE CLASSES** | | | | | @@ -268,6 +254,9 @@ openshift_management_app_template=cfme-template-ext-db  openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}  ``` +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. +  # Limitations  This release is the first OpenShift CFME release in the OCP 3.7 @@ -318,6 +307,9 @@ inventory. The following keys are required:  * `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`*  * `DATABASE_NAME` +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. +  Your inventory would contain a line similar to this:  ```ini @@ -453,6 +445,116 @@ hash. This applies to **CloudForms** installations as well:  [cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml),  [cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml). +# Container Provider + +There are two methods for enabling container provider integration. You +can manually add OCP/Origin as a container provider, or you can try +the playbooks included with this role. + +## Manually + +See the online documentation for steps to manually add you cluster as +a container provider: + +* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers) + +## Automatically + +Automated container provider integration can be accomplished using the +playbooks included with this role. + +This playbook will: + +1. Gather the necessary authentication secrets +1. Find the public routes to the Management app and the cluster API +1. Make a REST call to add this cluster as a container provider + + +``` +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml +``` + +## Multiple Providers + +As well as providing playbooks to integrate your *current* container +platform into the management service, this role includes a **tech +preview** script which allows you to add multiple container platforms +as container providers in any arbitrary MIQ/CFME server. + +Using the multiple-provider script requires manual configuration and +setting an `EXTRA_VARS` parameter on the command-line. + + +1. Copy the +   [container_providers.yml](files/examples/container_providers.yml) +   example somewhere, such as `/tmp/cp.yml` +1. If you changed your CFME/MIQ name or password, update the +   `hostname`, `user`, and `password` parameters in the +   `management_server` key in the `container_providers.yml` file copy +1. Fill in an entry under the `container_providers` key for *each* OCP +   or Origin cluster you want to add as container providers + +**Parameters Which MUST Be Configured:** + +* `auth_key` - This is the token of a service account which has admin capabilities on the cluster. +* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname. +* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique. + +*Note*: You can obtain the `auth_key` bearer token from your clusters + with this command: `oc serviceaccounts get-token -n management-infra + management-admin` + +**Parameters Which MAY Be Configured:** + +* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443` +* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time. + + +Let's see an example describing the following scenario: + +* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml` +* You're adding two OCP clusters +* Your management server runs on `mgmt.example.com` + +You would customize `/tmp/cp.yml` as such: + +```yaml +--- +container_providers: +  - connection_configurations: +      - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} +        endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +    hostname: "ocp-prod.example.com" +    name: OCP Production +    port: 8443 +    type: "ManageIQ::Providers::Openshift::ContainerManager" +  - connection_configurations: +      - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} +        endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +    hostname: "ocp-test.example.com" +    name: OCP Testing +    port: 8443 +    type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: +  hostname: "mgmt.example.com" +  user: admin +  password: b3tt3r_p4SSw0rd +``` + +Then you will run the many-container-providers integration script. You +**must** provide the path to the container providers configuration +file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e` +(or `--extra-vars`) parameter to set `container_providers_config` to +the config file path. + +``` +$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ +      playbooks/byo/openshift-management/add_many_container_providers.yml +``` + +Afterwards you will find two new container providers in your +management service. Navigate to `Compute` → `Containers` → `Providers` +to see an overview.  # Uninstall @@ -461,6 +563,11 @@ installation:  * `playbooks/byo/openshift-management/uninstall.yml` +NFS export definitions and data stored on NFS exports are not +automatically removed. You are urged to manually erase any data from +old application or database deployments before attempting to +initialize a new deployment. +  # Additional Information  The upstream project, diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index ebb56313f..8ba65b386 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -77,6 +77,20 @@ openshift_management_storage_nfs_base_dir: /exports  openshift_management_storage_nfs_local_hostname: false  ###################################################################### +# DEFAULT ACCOUNT INFORMATION +###################################################################### +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +openshift_management_username: admin +openshift_management_password: smartvm + +######################################################################  # SCAFFOLDING - These are parameters we pre-seed that a user may or  # may not set later  ###################################################################### diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml new file mode 100644 index 000000000..661f62e4d --- /dev/null +++ b/roles/openshift_management/files/examples/container_providers.yml @@ -0,0 +1,22 @@ +--- +container_providers: +  - connection_configurations: +      - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} +        endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +    hostname: "OCP/Origin cluster hostname (providing API access)" +    name: openshift-management +    port: 8443 +    type: "ManageIQ::Providers::Openshift::ContainerManager" +# Copy and update for as many OCP or Origin providers as you want to +# add to your management service +  # - connection_configurations: +  #     - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} +  #       endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +  #   hostname: "OCP/Origin cluster hostname (providing API access)" +  #   name: openshift-management +  #   port: 8443 +  #   type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: +  hostname: "Management server hostname (providing API access)" +  user: admin +  password: smartvm diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py new file mode 100644 index 000000000..3b7013d9a --- /dev/null +++ b/roles/openshift_management/filter_plugins/oo_management_filters.py @@ -0,0 +1,32 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +""" +Filter methods for the management role +""" + + +def oo_filter_container_providers(results): +    """results - the result from posting the API calls for adding new +providers""" +    all_results = [] +    for result in results: +        if 'results' in result['json']: +            # We got an OK response +            res = result['json']['results'][0] +            all_results.append("Provider '{}' - Added successfully".format(res['name'])) +        elif 'error' in result['json']: +            # This was a problem +            all_results.append("Provider '{}' - Failed to add. Message: {}".format( +                result['item']['name'], result['json']['error']['message'])) +    return all_results + + +class FilterModule(object): +    """ Custom ansible filter mapping """ + +    # pylint: disable=no-self-use, too-few-public-methods +    def filters(self): +        """ returns a mapping of filters to methods """ +        return { +            "oo_filter_container_providers": oo_filter_container_providers, +        } diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml new file mode 100644 index 000000000..383e6edb5 --- /dev/null +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -0,0 +1,65 @@ +--- +- name: Ensure lib_openshift modules are available +  include_role: +    role: lib_openshift + +- name: Ensure OpenShift facts module is available +  include_role: +    role: openshift_facts + +- name: Ensure OpenShift facts are loaded +  openshift_facts: + +- name: Ensure the management SA Secrets are read +  oc_serviceaccount_secret: +    state: list +    service_account: management-admin +    namespace: management-infra +  register: sa + +- name: Ensure the management SA bearer token is identified +  set_fact: +    management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +- name: Ensure the SA bearer token value is read +  oc_secret: +    state: list +    name: "{{ management_token }}" +    namespace: management-infra +    decode: true +  no_log: True +  register: sa_secret + +- name: Ensure the SA bearer token value is saved +  set_fact: +    management_bearer_token: "{{ sa_secret.results.decoded.token }}" + +- name: Ensure we have the public route to the management service +  oc_route: +    state: list +    name: httpd +    namespace: openshift-management +  register: route + +- name: Ensure the management service route is saved +  set_fact: +    management_route: "{{ route.results.0.spec.host }}" + +- name: Ensure this cluster is a container provider +  uri: +    url: "https://{{ management_route }}/api/providers" +    body_format: json +    method: POST +    user: "{{ openshift_management_username }}" +    password: "{{ openshift_management_password }}" +    validate_certs: no +    # Docs on formatting the BODY of the POST request: +    # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations +    body: +      connection_configurations: +        - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} +          endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +      hostname: "{{ openshift.master.cluster_public_hostname }}" +      name: "{{ openshift_management_project }}" +      port: "{{ openshift.master.api_port }}" +      type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index 86c4d0010..9be923a57 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -2,23 +2,33 @@  ######################################################################)  # Users, projects, and privileges -- name: Run pre-install CFME validation checks +- name: Run pre-install Management validation checks    include: validate.yml -- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists" +# This creates a service account allowing Container Provider +# integration (managing OCP/Origin via MIQ/Management) +- name: Enable Container Provider Integration +  include_role: +    role: openshift_manageiq + +- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"    oc_project:      state: present      name: "{{ openshift_management_project }}"      display_name: "{{ openshift_management_project_description }}" -- name: Create and Authorize CFME Accounts +- name: Create and Authorize Management Accounts    include: accounts.yml  ######################################################################  # STORAGE - Initialize basic storage class +- name: Determine the correct NFS host if required +  include: storage/nfs_server.yml +  when: openshift_management_storage_class in ['nfs', 'nfs_external'] +  #---------------------------------------------------------------------  # * nfs - set up NFS shares on the first master for a proof of concept -- name: Create required NFS exports for CFME app storage +- name: Create required NFS exports for Management app storage    include: storage/nfs.yml    when: openshift_management_storage_class == 'nfs' @@ -45,7 +55,7 @@  ######################################################################  # APPLICATION TEMPLATE -- name: Install the CFME app and PV templates +- name: Install the Management app and PV templates    include: template.yml  ###################################################################### @@ -71,9 +81,16 @@    when:      - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App is created +- name: Ensure the Management App is created    oc_process:      namespace: "{{ openshift_management_project }}"      template_name: "{{ openshift_management_template_name }}"      create: True      params: "{{ openshift_management_template_parameters }}" + +- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max +  command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" +  register: app_seeding_logs +  until: app_seeding_logs.stdout.find('Server starting complete') != -1 +  delay: 30 +  retries: 20 diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_management/tasks/noop.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index 31c845725..d1b9a8d5c 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -26,7 +26,7 @@        when:          - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined -- name: Check if the CFME App PV has been created +- name: Check if the Management App PV has been created    oc_obj:      namespace: "{{ openshift_management_project }}"      state: list @@ -34,7 +34,7 @@      name: "{{ openshift_management_flavor_short }}-app"    register: miq_app_pv_check -- name: Check if the CFME DB PV has been created +- name: Check if the Management DB PV has been created    oc_obj:      namespace: "{{ openshift_management_project }}"      state: list @@ -44,7 +44,7 @@    when:      - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App PV is created +- name: Ensure the Management App PV is created    oc_process:      namespace: "{{ openshift_management_project }}"      template_name: "{{ openshift_management_flavor }}-app-pv" @@ -55,7 +55,7 @@        NFS_HOST: "{{ openshift_management_nfs_server }}"    when: miq_app_pv_check.results.results == [{}] -- name: Ensure the CFME DB PV is created +- name: Ensure the Management DB PV is created    oc_process:      namespace: "{{ openshift_management_project }}"      template_name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 696808328..94e11137c 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -2,37 +2,6 @@  # Tasks to statically provision NFS volumes  # Include if not using dynamic volume provisioning -- name: Ensure we save the local NFS server if one is provided -  set_fact: -    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" -  when: -    - openshift_management_storage_nfs_local_hostname is defined -    - openshift_management_storage_nfs_local_hostname != False -    - openshift_management_storage_class == "nfs" - -- name: Ensure we save the local NFS server -  set_fact: -    openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" -  when: -    - openshift_management_nfs_server is not defined -    - openshift_management_storage_class == "nfs" - -- name: Ensure we save the external NFS server -  set_fact: -    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" -  when: -    - openshift_management_storage_class == "nfs_external" - -- name: Failed NFS server detection -  assert: -    that: -      - openshift_management_nfs_server is defined -    msg: | -      "Unable to detect an NFS server. The 'nfs_external' -      openshift_management_storage_class option requires that you set -      openshift_management_storage_nfs_external_hostname. NFS hosts detected -      for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" -  - name: Setting up NFS storage    block:      - name: Include the NFS Setup role tasks diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml new file mode 100644 index 000000000..96a742c83 --- /dev/null +++ b/roles/openshift_management/tasks/storage/nfs_server.yml @@ -0,0 +1,31 @@ +--- +- name: Ensure we save the local NFS server if one is provided +  set_fact: +    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" +  when: +    - openshift_management_storage_nfs_local_hostname is defined +    - openshift_management_storage_nfs_local_hostname != False +    - openshift_management_storage_class == "nfs" + +- name: Ensure we save the local NFS server +  set_fact: +    openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" +  when: +    - openshift_management_nfs_server is not defined +    - openshift_management_storage_class == "nfs" + +- name: Ensure we save the external NFS server +  set_fact: +    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" +  when: +    - openshift_management_storage_class == "nfs_external" + +- name: Failed NFS server detection +  assert: +    that: +      - openshift_management_nfs_server is defined +    msg: | +      "Unable to detect an NFS server. The 'nfs_external' +      openshift_management_storage_class option requires that you set +      openshift_management_storage_nfs_external_hostname. NFS hosts detected +      for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 299158ac4..9f97cdcb9 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -15,7 +15,7 @@  # STANDARD PODIFIED DATABASE TEMPLATE  - when: openshift_management_app_template in ['miq-template', 'cfme-template']    block: -  - name: Check if the CFME Server template has been created already +  - name: Check if the Management Server template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list @@ -25,12 +25,12 @@    - when: miq_server_check.results.results == [{}]      block: -    - name: Copy over CFME Server template +    - name: Copy over Management Server template        copy:          src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"          dest: "{{ template_dir }}/" -    - name: Ensure CFME Server Template is created +    - name: Ensure Management Server Template is created        oc_obj:          namespace: "{{ openshift_management_project }}"          name: "{{ openshift_management_flavor }}" @@ -41,9 +41,9 @@  ######################################################################  # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template'] +- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']    block: -  - name: Check if the CFME Ext-DB Server template has been created already +  - name: Check if the Management Ext-DB Server template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list @@ -53,12 +53,12 @@    - when: miq_ext_db_server_check.results.results == [{}]      block: -    - name: Copy over CFME Ext-DB Server template +    - name: Copy over Management Ext-DB Server template        copy:          src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"          dest: "{{ template_dir }}/" -    - name: Ensure CFME Ext-DB Server Template is created +    - name: Ensure Management Ext-DB Server Template is created        oc_obj:          namespace: "{{ openshift_management_project }}"          name: "{{ openshift_management_flavor }}-ext-db" @@ -74,7 +74,7 @@  # Begin conditional PV template creations  # Required for the application server -- name: Check if the CFME App PV template has been created already +- name: Check if the Management App PV template has been created already    oc_obj:      namespace: "{{ openshift_management_project }}"      state: list @@ -84,12 +84,12 @@  - when: miq_app_pv_check.results.results == [{}]    block: -  - name: Copy over CFME App PV template +  - name: Copy over Management App PV template      copy:        src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"        dest: "{{ template_dir }}/" -  - name: Ensure CFME App PV Template is created +  - name: Ensure Management App PV Template is created      oc_obj:        namespace: "{{ openshift_management_project }}"        name: "{{ openshift_management_flavor }}-app-pv" @@ -103,7 +103,7 @@  # Required for database if the installation is fully podified  - when: openshift_management_app_template in ['miq-template', 'cfme-template']    block: -  - name: Check if the CFME DB PV template has been created already +  - name: Check if the Management DB PV template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list @@ -113,12 +113,12 @@    - when: miq_db_pv_check.results.results == [{}]      block: -    - name: Copy over CFME DB PV template +    - name: Copy over Management DB PV template        copy:          src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"          dest: "{{ template_dir }}/" -    - name: Ensure CFME DB PV Template is created +    - name: Ensure Management DB PV Template is created        oc_obj:          namespace: "{{ openshift_management_project }}"          name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml index f79955e95..e2edd5ef4 100644 --- a/roles/openshift_master/tasks/journald.yml +++ b/roles/openshift_master/tasks/journald.yml @@ -3,6 +3,11 @@    stat: path=/etc/systemd/journald.conf    register: journald_conf_file +- name: Create journald persistence directories +  file: +    path: /var/log/journal +    state: directory +  - name: Update journald setup    replace:      dest: /etc/systemd/journald.conf diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index d0bc79c0c..48b34c578 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -297,14 +297,13 @@    - openshift.master.cluster_method == 'native'    - master_api_service_status_changed | bool -- name: Start and enable master controller on first master +- name: Start and enable master controller service    systemd:      name: "{{ openshift.common.service_type }}-master-controllers"      enabled: yes      state: started    when:    - openshift.master.cluster_method == 'native' -  - inventory_hostname == openshift_master_hosts[0]    register: l_start_result    until: not l_start_result | failed    retries: 1 @@ -315,31 +314,8 @@    when:    - l_start_result | failed -- name: Wait for master controller service to start on first master -  pause: -    seconds: 15 -  when: -  - openshift.master.cluster_method == 'native' - -- name: Start and enable master controller on all masters -  systemd: -    name: "{{ openshift.common.service_type }}-master-controllers" -    enabled: yes -    state: started -  when: -  - openshift.master.cluster_method == 'native' -  - inventory_hostname != openshift_master_hosts[0] -  register: l_start_result -  until: not l_start_result | failed -  retries: 1 -  delay: 60 - -- name: Dump logs from master-controllers if it failed -  command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers -  when: -  - l_start_result | failed - -- set_fact: +- name: Set fact master_controllers_service_status_changed +  set_fact:      master_controllers_service_status_changed: "{{ l_start_result | changed }}"    when:    - openshift.master.cluster_method == 'native' diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 40775571f..a1a0bfaa9 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -179,6 +179,11 @@ masterPublicURL: {{ openshift.master.public_api_url }}  networkConfig:    clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} +{% if openshift.common.version_gte_3_7 | bool %} +  clusterNetworks: +  - cidr: {{ openshift.master.sdn_cluster_network_cidr }} +    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} +{% endif %}  {% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}    networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}  {% endif %} diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml index 39323904f..b0b888d56 100644 --- a/roles/openshift_nfs/tasks/create_export.yml +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -12,7 +12,7 @@  #   l_nfs_export_name: Name of sub-directory of the export  #   l_nfs_options: Mount Options -- name: Ensure CFME App NFS export directory exists +- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists"    file:      path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"      state: directory diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index 8c03f6c41..2deb005da 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -25,7 +25,7 @@      state: "{{ item.state | default('present') }}"    with_items:    # add the kubeconfig -  - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig" +  - line: "KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig"      regexp: "^KUBECONFIG=.*"    # remove the config file.  This comes from openshift_facts    - regexp: "^CONFIG_FILE=.*" diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml index eae832fcf..ebcff46b5 100644 --- a/roles/openshift_node_dnsmasq/defaults/main.yml +++ b/roles/openshift_node_dnsmasq/defaults/main.yml @@ -1,2 +1,7 @@  ---  openshift_node_dnsmasq_install_network_manager_hook: true + +# lo must always be present in this list or dnsmasq will conflict with +# the node's dns service. +openshift_node_dnsmasq_except_interfaces: +- lo diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 index ef3ba2880..5c9601277 100644 --- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 +++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 @@ -4,4 +4,7 @@ no-negcache  max-cache-ttl=1  enable-dbus  bind-interfaces -listen-address={{ openshift.node.dns_ip }} +{% for interface in openshift_node_dnsmasq_except_interfaces %} +except-interface={{ interface }} +{% endfor %} +# End of config diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 74c1a51a8..a6c168bc7 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -23,6 +23,8 @@      # TODO: once this is well-documented, add deprecation notice if using old name.      deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"      openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" +    deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" +    openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"  - name: Abort when deployment type is invalid    # this variable is required; complain early and clearly if it is invalid. @@ -45,7 +47,7 @@  - name: Abort when openshift_release is invalid    when:      - openshift_release is defined -    - not openshift_release | match('\d+(\.\d+){1,3}$') +    - not openshift_release | match('^\d+(\.\d+){1,3}$')    fail:      msg: |-        openshift_release is "{{ openshift_release }}" which is not a valid version string. diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index 5d5352c1c..0e5bb7230 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -24,6 +24,7 @@ spec:  {% endfor %}        containers:        - args: +        - apiserver          - --storage-type          - etcd          - --secure-port @@ -45,7 +46,7 @@ spec:          - --feature-gates          - OriginatingIdentity=true          image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} -        command: ["/usr/bin/apiserver"] +        command: ["/usr/bin/service-catalog"]          imagePullPolicy: Always          name: apiserver          ports: diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 index 2272cbb44..e5e5f6b50 100644 --- a/roles/openshift_service_catalog/templates/controller_manager.j2 +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -29,6 +29,7 @@ spec:              fieldRef:                fieldPath: metadata.namespace          args: +        - controller-manager          - -v          - "5"          - --leader-election-namespace @@ -38,7 +39,7 @@ spec:          - --feature-gates          - OriginatingIdentity=true          image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} -        command: ["/usr/bin/controller-manager"] +        command: ["/usr/bin/service-catalog"]          imagePullPolicy: Always          name: controller-manager          ports: diff --git a/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml new file mode 100644 index 000000000..7b705c2d4 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml @@ -0,0 +1,135 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: deploy-heketi +  labels: +    glusterfs: heketi-template +    deploy-heketi: support +  annotations: +    description: Bootstrap Heketi installation +    tags: glusterfs,heketi,installation +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-service +      deploy-heketi: support +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: deploy-heketi-${CLUSTER_NAME} +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-route +      deploy-heketi: support +  spec: +    to: +      kind: Service +      name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-dc +      deploy-heketi: support +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: deploy-heketi +        labels: +          glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +          deploy-heketi: support +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          - name: HEKETI_KUBE_NAMESPACE +            value: ${HEKETI_KUBE_NAMESPACE} +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_KUBE_NAMESPACE +  displayName: Namespace +  description: Set the namespace where the GlusterFS pods reside +  value: default +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: glusterfs +  labels: +    glusterfs: template +  annotations: +    description: GlusterFS DaemonSet template +    tags: glusterfs +objects: +- kind: DaemonSet +  apiVersion: extensions/v1beta1 +  metadata: +    name: glusterfs-${CLUSTER_NAME} +    labels: +      glusterfs: ${CLUSTER_NAME}-daemonset +    annotations: +      description: GlusterFS DaemonSet +      tags: glusterfs +  spec: +    selector: +      matchLabels: +        glusterfs: ${CLUSTER_NAME}-pod +    template: +      metadata: +        name: glusterfs-${CLUSTER_NAME} +        labels: +          glusterfs: ${CLUSTER_NAME}-pod +          glusterfs-node: pod +      spec: +        nodeSelector: "${{NODE_LABELS}}" +        hostNetwork: true +        containers: +        - name: glusterfs +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          volumeMounts: +          - name: glusterfs-heketi +            mountPath: "/var/lib/heketi" +          - name: glusterfs-run +            mountPath: "/run" +          - name: glusterfs-lvm +            mountPath: "/run/lvm" +          - name: glusterfs-etc +            mountPath: "/etc/glusterfs" +          - name: glusterfs-logs +            mountPath: "/var/log/glusterfs" +          - name: glusterfs-config +            mountPath: "/var/lib/glusterd" +          - name: glusterfs-dev +            mountPath: "/dev" +          - name: glusterfs-misc +            mountPath: "/var/lib/misc/glusterfsd" +          - name: glusterfs-cgroup +            mountPath: "/sys/fs/cgroup" +            readOnly: true +          - name: glusterfs-ssl +            mountPath: "/etc/ssl" +            readOnly: true +          securityContext: +            capabilities: {} +            privileged: true +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          resources: {} +          terminationMessagePath: "/dev/termination-log" +        volumes: +        - name: glusterfs-heketi +          hostPath: +            path: "/var/lib/heketi" +        - name: glusterfs-run +          emptyDir: {} +        - name: glusterfs-lvm +          hostPath: +            path: "/run/lvm" +        - name: glusterfs-etc +          hostPath: +            path: "/etc/glusterfs" +        - name: glusterfs-logs +          hostPath: +            path: "/var/log/glusterfs" +        - name: glusterfs-config +          hostPath: +            path: "/var/lib/glusterd" +        - name: glusterfs-dev +          hostPath: +            path: "/dev" +        - name: glusterfs-misc +          hostPath: +            path: "/var/lib/misc/glusterfsd" +        - name: glusterfs-cgroup +          hostPath: +            path: "/sys/fs/cgroup" +        - name: glusterfs-ssl +          hostPath: +            path: "/etc/ssl" +        restartPolicy: Always +        terminationGracePeriodSeconds: 30 +        dnsPolicy: ClusterFirst +        securityContext: {} +parameters: +- name: NODE_LABELS +  displayName: Daemonset Node Labels +  description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' +  value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME +  displayName: GlusterFS container image name +  required: True +- name: IMAGE_VERSION +  displayName: GlusterFS container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage diff --git a/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: heketi +  labels: +    glusterfs: heketi-template +  annotations: +    description: Heketi service deployment template +    tags: glusterfs,heketi +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-service +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: heketi +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-route +  spec: +    to: +      kind: Service +      name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-dc +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: heketi-${CLUSTER_NAME} +        labels: +          glusterfs: heketi-${CLUSTER_NAME}-pod +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          - name: HEKETI_KUBE_NAMESPACE +            value: ${HEKETI_KUBE_NAMESPACE} +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +          glusterfs: +            endpoints: heketi-db-${CLUSTER_NAME}-endpoints +            path: heketidbstorage +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_KUBE_NAMESPACE +  displayName: Namespace +  description: Set the namespace where the GlusterFS pods reside +  value: default +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 074904bec..54a6dd7c3 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@  ---  - name: Create heketi DB volume -  command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json" +  command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"    register: setup_storage  - name: Copy heketi-storage list diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml new file mode 100644 index 000000000..030fa81c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml @@ -0,0 +1,12 @@ +--- +- name: Ensure device mapper modules loaded +  template: +    src: glusterfs.conf +    dest: /etc/modules-load.d/glusterfs.conf +  register: km + +- name: load kernel modules +  systemd: +    name: systemd-modules-load.service +    state: restarted +  when: km | changed diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf new file mode 100644 index 000000000..dd4d6e6f7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -0,0 +1,4 @@ +#{{ ansible_managed }} +dm_thin_pool +dm_snapshot +dm_mirror
\ No newline at end of file diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..454e84aaf --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: +  name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: +  resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" +  restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} +  secretNamespace: "{{ glusterfs_namespace }}" +  secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 @@ -0,0 +1,36 @@ +{ +	"_port_comment": "Heketi Server Port Number", +	"port" : "8080", + +	"_use_auth": "Enable JWT authorization. Please enable for deployment", +	"use_auth" : false, + +	"_jwt" : "Private keys for access", +	"jwt" : { +		"_admin" : "Admin has access to all APIs", +		"admin" : { +			"key" : "My Secret" +		}, +		"_user" : "User only has access to /volumes endpoint", +		"user" : { +			"key" : "My Secret" +		} +	}, + +	"_glusterfs_comment": "GlusterFS Configuration", +	"glusterfs" : { + +		"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", +		"executor" : "{{ glusterfs_heketi_executor }}", + +		"_db_comment": "Database file name", +		"db" : "/var/lib/heketi/heketi.db", + +		"sshexec" : { +			"keyfile" : "/etc/heketi/private_key", +			"port" : "{{ glusterfs_heketi_ssh_port }}", +			"user" : "{{ glusterfs_heketi_ssh_user }}", +			"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} +		} +	} +} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 @@ -0,0 +1,49 @@ +{ +  "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} +  {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} +  {%- if cluster in clusters -%} +    {%- set _dummy = clusters[cluster].append(node) -%} +  {%- else -%} +    {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} +  {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} +    { +      "nodes": [ +{%- for node in clusters[cluster] -%} +        { +          "node": { +            "hostnames": { +              "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} +                "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} +                "{{ node }}" +{%- endif -%} +              ], +              "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} +                "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} +              ] +            }, +            "zone": {{ hostvars[node].glusterfs_zone | default(1) }} +          }, +          "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} +            "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} +          ] +        }{% if not loop.last %},{% endif %} +{%- endfor -%} +      ] +    }{% if not loop.last %},{% endif %} +{%- endfor -%} +  ] +} diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml index 53d10f1f8..01a1a7472 100644 --- a/roles/openshift_version/defaults/main.yml +++ b/roles/openshift_version/defaults/main.yml @@ -1,3 +1,2 @@  ---  openshift_protect_installed_version: True -version_install_base_package: False diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index f4e9ff43a..1c8b9046c 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -5,16 +5,6 @@      is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"      is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" -# This is only needed on masters and nodes; version_install_base_package -# should be set by a play externally. -- name: Install the base package for versioning -  package: -    name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" -    state: present -  when: -  - not is_containerized | bool -  - version_install_base_package | bool -  # Block attempts to install origin without specifying some kind of version information.  # This is because the latest tags for origin are usually alpha builds, which should not  # be used by default. Users must indicate what they want.  | 
