diff options
Diffstat (limited to 'roles')
56 files changed, 669 insertions, 344 deletions
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2 index b882a5597..7a1236392 100644 --- a/roles/calico/templates/calico.service.j2 +++ b/roles/calico/templates/calico.service.j2 @@ -1,7 +1,7 @@  [Unit]  Description=calico -After=docker.service -Requires=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service  [Service]  Restart=always diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 8e4b66fbe..4506d2231 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -1,6 +1,6 @@  [Unit]  Description=Contiv ACI gw -After=auditd.service systemd-user-sessions.service time-sync.target docker.service +After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service  [Service]  ExecStart={{ bin_dir }}/aci_gw.sh start diff --git a/roles/dns/templates/named.service.j2 b/roles/dns/templates/named.service.j2 index 566739f25..6e0a7a640 100644 --- a/roles/dns/templates/named.service.j2 +++ b/roles/dns/templates/named.service.j2 @@ -1,7 +1,7 @@  [Unit] -Requires=docker.service -After=docker.service -PartOf=docker.service +Requires={{ openshift.docker.service_name }}.service +After={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service  [Service]  Type=simple @@ -12,4 +12,4 @@ ExecStart=/usr/bin/docker run --name bind -p 53:53/udp -v /var/log:/var/log -v /  ExecStop=/usr/bin/docker stop bind  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index b0d0632b0..a461c479a 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -114,6 +114,15 @@      dest: "{{ docker_systemd_dir }}/custom.conf"      src: systemcontainercustom.conf.j2 +# Set local versions of facts that must be in json format for daemon.json +# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson +- set_fact: +    l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}" +    l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" +    l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}" +    l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}" +    l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}" +  # Configure container-engine using the daemon.json file  - name: Configure Container Engine    template: diff --git a/roles/docker/templates/daemon.json b/roles/docker/templates/daemon.json index 30a1b30f4..82edf27c0 100644 --- a/roles/docker/templates/daemon.json +++ b/roles/docker/templates/daemon.json @@ -12,7 +12,7 @@      "default-gateway": "",      "default-gateway-v6": "",      "default-runtime": "oci", -    "containerd": "/var/run/containerd.sock", +    "containerd": "/run/containerd.sock",      "default-ulimits": {},      "disable-legacy-registry": false,      "dns": [], @@ -26,7 +26,7 @@      "group": "",      "hosts": [],      "icc": false, -    "insecure-registries": {{ docker_insecure_registries|default([]) }}, +    "insecure-registries": {{ l_docker_insecure_registries }},      "ip": "0.0.0.0",      "iptables": false,      "ipv6": false, @@ -36,9 +36,9 @@      "live-restore": true,  {% if docker_log_driver is defined  %}      "log-driver": "{{ docker_log_driver }}", -{% endif %} +{%- endif %}      "log-level": "", -    "log-opts": {{ docker_log_options|default({}) }}, +    "log-opts": {{ l_docker_log_options }},      "max-concurrent-downloads": 3,      "max-concurrent-uploads": 5,      "mtu": 0, @@ -51,7 +51,7 @@  	    "path": "/usr/libexec/docker/docker-runc-current"  	}      }, -    "selinux-enabled": {{ docker_selinux_enabled|default(true) }}, +    "selinux-enabled": {{ l_docker_selinux_enabled }},      "storage-driver": "",      "storage-opts": [],      "tls": true, @@ -60,7 +60,7 @@      "tlskey": "",      "tlsverify": true,      "userns-remap": "", -    "add-registry": {{  docker_additional_registries|default([]) }}, -    "blocked-registries": {{ docker_blocked_registries|default([]) }}, +    "add-registry": {{ l_docker_additional_registries }}, +    "block-registries": {{ l_docker_blocked_registries }},      "userland-proxy-path": "/usr/libexec/docker/docker-proxy-current"  } diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index c8ceaa6ba..adeca7a91 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -1,8 +1,8 @@  [Unit]  Description=The Etcd Server container -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service  [Service]  EnvironmentFile={{ etcd_conf_file }} @@ -14,4 +14,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md index 131a01490..d1c3a6602 100644 --- a/roles/etcd_common/README.md +++ b/roles/etcd_common/README.md @@ -1,17 +1,21 @@  etcd_common  ======================== -TODO +Common resources for dependent etcd roles. E.g. default variables for: +* config directories +* certificates +* ports +* other settings -Requirements ------------- - -TODO +Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g. -Role Variables --------------- +```yaml +- delegated_serial_command: +    command: /usr/bin/make_database.sh arg1 arg2 +    creates: /path/to/database +``` -TODO +Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example).  Dependencies  ------------ @@ -21,7 +25,22 @@ openshift-repos  Example Playbook  ---------------- -TODO +**Drop etcdctl aliases** + +```yaml +- include_role: +    name: etcd_common +    tasks_from: etcdctl +``` + +**Get access to common variables** + +```yaml +# meta.yml of etcd +... +dependencies: +- { role: etcd_common } +```  License  ------- diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index e12137b51..bdce28045 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1548,7 +1548,7 @@ class OCObject(OpenShiftCLI):          if state == 'absent':              # verify its not in our results              if (params['name'] is not None or params['selector'] is not None) and \ -               (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0): +               (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):                  return {'changed': False, 'state': state}              if check_mode: diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 5a507348c..b89215510 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -80,6 +80,18 @@ options:      required: false      default: False      aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: false +    default: None +    aliases: [] +  vol_name: +    description: +    - Name of the volume that is being queried. +    required: false +    default: None +    aliases: []    namespace:      description:      - The name of the namespace where the object lives diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 89ee2f5a0..6f0da3d5c 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -117,7 +117,7 @@ class OCObject(OpenShiftCLI):          if state == 'absent':              # verify its not in our results              if (params['name'] is not None or params['selector'] is not None) and \ -               (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0): +               (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):                  return {'changed': False, 'state': state}              if check_mode: diff --git a/roles/lib_openshift/src/doc/volume b/roles/lib_openshift/src/doc/volume index 1d04afeef..43ff78c9f 100644 --- a/roles/lib_openshift/src/doc/volume +++ b/roles/lib_openshift/src/doc/volume @@ -29,6 +29,18 @@ options:      required: false      default: False      aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: false +    default: None +    aliases: [] +  vol_name: +    description: +    - Name of the volume that is being queried. +    required: false +    default: None +    aliases: []    namespace:      description:      - The name of the namespace where the object lives diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py index 6990a11a8..f350bd25d 100644 --- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py +++ b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py @@ -1,6 +1,5 @@  #!/usr/bin/python  # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4  '''  Custom filters for use in testing  ''' diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py index 577a14b9a..a2bc9ecdb 100644 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py @@ -1,6 +1,5 @@  #!/usr/bin/python  # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4  """  Custom filters for use in openshift-ansible  """ diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py index 4ed3e1f01..57ac16602 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/openshift_cli/library/openshift_container_binary_sync.py @@ -1,8 +1,6 @@  #!/usr/bin/python  # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4  # pylint: disable=missing-docstring,invalid-name -#  import random  import tempfile diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 5ea902e2b..914e46c05 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1,7 +1,6 @@  #!/usr/bin/python  # pylint: disable=too-many-lines  # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4  # Reason: Disable pylint too-many-lines because we don't want to split up this file.  # Status: Permanently disabled to keep this module as self-contained as possible. @@ -2168,7 +2167,9 @@ class OpenShiftFacts(object):                          glusterfs=dict(                              endpoints='glusterfs-registry-endpoints',                              path='glusterfs-registry-volume', -                            readOnly=False), +                            readOnly=False, +                            swap=False, +                            swapcopy=True),                          host=None,                          access=dict(                              modes=['ReadWriteMany'] diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index 208e81048..7bce7f107 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -1,4 +1,3 @@ -# vim: expandtab:tabstop=4:shiftwidth=4  '''  Ansible callback plugin.  ''' diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py index a46589443..4460ec324 100755 --- a/roles/openshift_health_checker/library/aos_version.py +++ b/roles/openshift_health_checker/library/aos_version.py @@ -1,5 +1,4 @@  #!/usr/bin/python -# vim: expandtab:tabstop=4:shiftwidth=4  '''  Ansible module for yum-based systems determining if multiple releases  of an OpenShift package are available, and if the release requested diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py index 630ebc848..433795b67 100755 --- a/roles/openshift_health_checker/library/check_yum_update.py +++ b/roles/openshift_health_checker/library/check_yum_update.py @@ -1,5 +1,4 @@  #!/usr/bin/python -# vim: expandtab:tabstop=4:shiftwidth=4  '''  Ansible module to test whether a yum update or install will succeed,  without actually performing it or running yum. diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 6d576df71..3e5d7f860 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -28,6 +28,14 @@ From this role:  | openshift_hosted_registry_selector    | region=infra                             | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. |  | openshift_hosted_registry_cert_expire_days | `730` (2 years)                     | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later.                             | +If you specify `openshift_hosted_registry_kind=glusterfs`, the following +variables also control configuration behavior: + +| Name                                         | Default value | Description                                                                  | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap     | False         | Whether to swap an existing registry's storage volume for a GlusterFS volume | +| openshift_hosted_registry_glusterfs_swapcopy | True          | If swapping, also copy the current contents of the registry volume           | +  Dependencies  ------------ diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 6e691c26f..751489958 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -61,7 +61,7 @@      name: "{{ openshift_hosted_registry_serviceaccount }}"      namespace: "{{ openshift_hosted_registry_namespace }}" -- name: Grant the registry serivce account access to the appropriate scc +- name: Grant the registry service account access to the appropriate scc    oc_adm_policy_user:      user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"      namespace: "{{ openshift_hosted_registry_namespace }}" @@ -126,4 +126,4 @@  - include: storage/glusterfs.yml    when: -  - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' +  - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index b18b24266..e6bb196b8 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -1,10 +1,18 @@  --- +- name: Get registry DeploymentConfig +  oc_obj: +    namespace: "{{ openshift_hosted_registry_namespace }}" +    state: list +    kind: dc +    name: "{{ openshift_hosted_registry_name }}" +  register: registry_dc +  - name: Wait for registry pods    oc_obj:      namespace: "{{ openshift_hosted_registry_namespace }}"      state: list      kind: pod -    selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" +    selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"    register: registry_pods    until:    - "registry_pods.results.results[0]['items'] | count > 0" @@ -38,6 +46,39 @@      mode: "2775"      recurse: True +- block: +  - name: Activate registry maintenance mode +    oc_env: +      namespace: "{{ openshift_hosted_registry_namespace }}" +      name: "{{ openshift_hosted_registry_name }}" +      env_vars: +      - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + +  - name: Get first registry pod name +    set_fact: +      registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}" + +  - name: Copy current registry contents to new GlusterFS volume +    command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" +    when: openshift.hosted.registry.storage.glusterfs.swapcopy + +  - name: Swap new GlusterFS registry volume +    oc_volume: +      namespace: "{{ openshift_hosted_registry_namespace }}" +      name: "{{ openshift_hosted_registry_name }}" +      vol_name: registry-storage +      mount_type: pvc +      claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + +  - name: Deactivate registry maintenance mode +    oc_env: +      namespace: "{{ openshift_hosted_registry_namespace }}" +      name: "{{ openshift_hosted_registry_name }}" +      state: absent +      env_vars: +      - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' +  when: openshift.hosted.registry.storage.glusterfs.swap +  - name: Unmount registry volume    mount:      state: unmounted diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 5385df3b7..72182fcdd 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -1,7 +1,7 @@  [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service  [Service]  ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer @@ -14,4 +14,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index c05cc5f98..76dfe518e 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -72,7 +72,7 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod  openshift_logging_fluentd_cpu_limit: 100m  openshift_logging_fluentd_memory_limit: 512Mi  openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}" +openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal if openshift_hosted_logging_use_journal is defined else (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}"  openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"  openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"  openshift_logging_fluentd_hosts: ['--all'] diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 index d13691259..5c93d823e 100644 --- a/roles/openshift_logging/templates/fluentd.j2 +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -59,6 +59,9 @@ spec:          - name: dockercfg            mountPath: /etc/sysconfig/docker            readOnly: true +        - name: dockerdaemoncfg +          mountPath: /etc/docker +          readOnly: true  {% if openshift_logging_use_mux_client | bool %}          - name: muxcerts            mountPath: /etc/fluent/muxkeys @@ -154,6 +157,9 @@ spec:        - name: dockercfg          hostPath:            path: /etc/sysconfig/docker +      - name: dockerdaemoncfg +        hostPath: +          path: /etc/docker  {% if openshift_logging_use_mux_client | bool %}        - name: muxcerts          secret: diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 58fabddeb..cfa860edf 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -32,6 +32,15 @@    - not openshift.common.is_master_system_container | bool    register: create_master_unit_file +- name: Install Master service file +  template: +    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" +    src: "{{ openshift.common.service_type }}-master.service" +  register: create_master_unit_file +  when: +  - not openshift.common.is_containerized | bool +  - (openshift.master.ha is not defined or not openshift.master.ha) | bool +  - command: systemctl daemon-reload    when: create_master_unit_file | changed diff --git a/roles/openshift_master/templates/atomic-openshift-master.service b/roles/openshift_master/templates/atomic-openshift-master.service new file mode 100644 index 000000000..02af4dd16 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master.service @@ -0,0 +1,23 @@ +[Unit] +Description=Atomic OpenShift Master +Documentation=https://github.com/openshift/origin +After=network-online.target +After=etcd.service +Before=atomic-openshift-node.service +Requires=network-online.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/atomic-openshift-master +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=atomic-openshift-master +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target +WantedBy=atomic-openshift-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 155abd970..897ee7285 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -4,9 +4,9 @@ Documentation=https://github.com/openshift/origin  After=etcd_container.service  Wants=etcd_container.service  Before={{ openshift.common.service_type }}-node.service -After=docker.service -PartOf=docker.service -Requires=docker.service +After={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service  [Service]  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api @@ -23,5 +23,5 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service  WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 088e8db43..451f3436a 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -3,9 +3,9 @@ Description=Atomic OpenShift Master Controllers  Documentation=https://github.com/openshift/origin  Wants={{ openshift.common.service_type }}-master-api.service  After={{ openshift.common.service_type }}-master-api.service -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service  [Service]  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers @@ -22,4 +22,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2 index 13381cd1a..7f40cb042 100644 --- a/roles/openshift_master/templates/master_docker/master.docker.service.j2 +++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2 @@ -1,7 +1,7 @@  [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service  After=etcd_container.service  Wants=etcd_container.service @@ -15,4 +15,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_master/templates/origin-master.service b/roles/openshift_master/templates/origin-master.service new file mode 100644 index 000000000..cf79dda02 --- /dev/null +++ b/roles/openshift_master/templates/origin-master.service @@ -0,0 +1,23 @@ +[Unit] +Description=Origin Master Service +Documentation=https://github.com/openshift/origin +After=network-online.target +After=etcd.service +Before=origin-node.service +Requires=network-online.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/origin-master +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=origin-master +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target +WantedBy=origin-node.service diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 33a0af07f..2617efaf1 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -64,7 +64,7 @@      --signer-key={{ openshift_ca_key }}      --signer-serial={{ openshift_ca_serial }}      --overwrite=false -  when: inventory_hostname != openshift_ca_host +  when: item != openshift_ca_host    with_items: "{{ hostvars                    | oo_select_keys(groups['oo_masters_to_config'])                    | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" @@ -95,7 +95,7 @@    with_items: "{{ hostvars                    | oo_select_keys(groups['oo_masters_to_config'])                    | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" -  when: inventory_hostname != openshift_ca_host +  when: item != openshift_ca_host    delegate_to: "{{ openshift_ca_host }}"    run_once: true diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index e570392ff..65f85066e 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -1,6 +1,5 @@  #!/usr/bin/python  # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4  '''  Custom filters for use in openshift-master  ''' diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 52482d09b..a0fbf7dfc 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -25,6 +25,13 @@    - openshift.common.is_containerized | bool    - not openshift.common.is_node_system_container | bool +- name: Install Node service file +  template: +    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" +    src: "{{ openshift.common.service_type }}-node.service" +  register: install_node_result +  when: not openshift.common.is_containerized | bool +  - name: Create the openvswitch service env file    template:      src: openvswitch.sysconfig.j2 @@ -115,6 +122,5 @@  - name: Reload systemd units    command: systemctl daemon-reload -  when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed    notify:    - restart node diff --git a/roles/openshift_node/templates/atomic-openshift-node.service b/roles/openshift_node/templates/atomic-openshift-node.service new file mode 100644 index 000000000..80232094a --- /dev/null +++ b/roles/openshift_node/templates/atomic-openshift-node.service @@ -0,0 +1,22 @@ +[Unit] +Description=Atomic OpenShift Node +After={{ openshift.docker.service_name }}.service +After=openvswitch.service +Wants={{ openshift.docker.service_name }}.service +Documentation=https://github.com/openshift/origin + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/atomic-openshift-node +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=65536 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=atomic-openshift-node +Restart=always +RestartSec=5s +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 0fb34cffd..4c47f8c0d 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -1,6 +1,6 @@  [Unit] -Requires=docker.service -After=docker.service +Requires={{ openshift.docker.service_name }}.service +After={{ openshift.docker.service_name }}.service  PartOf={{ openshift.common.service_type }}-node.service  Before={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index c42bdb7c3..06782cb8b 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -1,9 +1,9 @@  [Unit]  After={{ openshift.common.service_type }}-master.service -After=docker.service +After={{ openshift.docker.service_name }}.service  After=openvswitch.service -PartOf=docker.service -Requires=docker.service +PartOf={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service  {% if openshift.common.use_openshift_sdn %}  Requires=openvswitch.service  After=ovsdb-server.service @@ -25,4 +25,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 1e1f8967d..34aaaabd6 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -1,7 +1,7 @@  [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service  [Service]  EnvironmentFile=/etc/sysconfig/openvswitch @@ -14,4 +14,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_node/templates/origin-node.service b/roles/openshift_node/templates/origin-node.service new file mode 100644 index 000000000..8047301e6 --- /dev/null +++ b/roles/openshift_node/templates/origin-node.service @@ -0,0 +1,21 @@ +[Unit] +Description=Origin Node +After={{ openshift.docker.service_name }}.service +Wants={{ openshift.docker.service_name }}.service +Documentation=https://github.com/openshift/origin + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/origin-node +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=65536 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=origin-node +Restart=always +RestartSec=5s +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service index 0fb34cffd..4c47f8c0d 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service @@ -1,6 +1,6 @@  [Unit] -Requires=docker.service -After=docker.service +Requires={{ openshift.docker.service_name }}.service +After={{ openshift.docker.service_name }}.service  PartOf={{ openshift.common.service_type }}-node.service  Before={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 0ff398152..a9b393652 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -1,9 +1,9 @@  [Unit]  After={{ openshift.common.service_type }}-master.service -After=docker.service +After={{ openshift.docker.service_name }}.service  After=openvswitch.service -PartOf=docker.service -Requires=docker.service +PartOf={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service  {% if openshift.common.use_openshift_sdn %}  Requires=openvswitch.service  {% endif %} @@ -23,4 +23,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service index 1e1f8967d..34aaaabd6 100644 --- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service +++ b/roles/openshift_node_upgrade/templates/openvswitch.docker.service @@ -1,7 +1,7 @@  [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service  [Service]  EnvironmentFile=/etc/sysconfig/openvswitch @@ -14,4 +14,4 @@ Restart=always  RestartSec=5s  [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index cf0fb94c9..7b310dbf8 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -8,10 +8,24 @@ Requirements  * Ansible 2.2 +Host Groups +----------- + +The following group is expected to be populated for this role to run: + +* `[glusterfs]` + +Additionally, the following group may be specified either in addition to or +instead of the above group to deploy a GlusterFS cluster for use by a natively +hosted Docker registry: + +* `[glusterfs_registry]` +  Role Variables  -------------- -From this role: +This role has the following variables that control the integration of a +GlusterFS cluster into a new or existing OpenShift cluster:  | Name                                             | Default value           |                                         |  |--------------------------------------------------|-------------------------|-----------------------------------------| @@ -31,6 +45,25 @@ From this role:  | openshift_storage_glusterfs_heketi_url           | Undefined               | URL for the heketi REST API, dynamically determined in native mode  | openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` +Each role variable also has a corresponding variable to optionally configure a +separate GlusterFS cluster for use as storage for an integrated Docker +registry. These variables start with the prefix +`openshift_storage_glusterfs_registry_` and, for the most part, default to the +values in their corresponding non-registry variables. The following variables +are an exception: + +| Name                                              | Default value         |                                         | +|---------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace    | registry namespace    | Default is to use the hosted registry's namespace, otherwise 'default' +| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters + +Additionally, this role's behavior responds to the following registry-specific +variable: + +| Name                                         | Default value | Description                                                                  | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap     | False         | Whether to swap an existing registry's storage volume for a GlusterFS volume | +  Dependencies  ------------ @@ -47,6 +80,7 @@ Example Playbook    hosts: oo_first_master    roles:    - role: openshift_storage_glusterfs +    when: groups.oo_glusterfs_to_config | default([]) | count > 0  ```  License diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index ade850747..ebe9ca30b 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -2,7 +2,7 @@  openshift_storage_glusterfs_timeout: 300  openshift_storage_glusterfs_namespace: 'default'  openshift_storage_glusterfs_is_native: True -openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'  openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"  openshift_storage_glusterfs_version: 'latest'  openshift_storage_glusterfs_wipe: False @@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: ''  openshift_storage_glusterfs_heketi_user_key: ''  openshift_storage_glusterfs_heketi_topology_load: True  openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_heketi_url: "{{ omit }}" + +openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" +openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry' +openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" +openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" +openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" +openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" +openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" +openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" +openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" +openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" +openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" +openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" +openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml new file mode 100644 index 000000000..fa5fa2cb0 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -0,0 +1,166 @@ +--- +- name: Verify target namespace exists +  oc_project: +    state: present +    name: "{{ glusterfs_namespace }}" +  when: glusterfs_is_native or glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml +  when: glusterfs_is_native + +- name: Make sure heketi-client is installed +  package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources +  oc_obj: +    namespace: "{{ glusterfs_namespace }}" +    kind: "{{ item.kind }}" +    name: "{{ item.name | default(omit) }}" +    selector: "{{ item.selector | default(omit) }}" +    state: absent +  with_items: +  - kind: "template,route,service,dc,jobs,secret" +    selector: "deploy-heketi" +  - kind: "template,route,service,dc" +    name: "heketi" +  - kind: "svc,ep" +    name: "heketi-storage-endpoints" +  - kind: "sa" +    name: "heketi-service-account" +  failed_when: False +  when: glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate +  oc_obj: +    namespace: "{{ glusterfs_namespace }}" +    kind: pod +    state: list +    selector: "glusterfs=deploy-heketi-pod" +  register: heketi_pod +  until: "heketi_pod.results.results[0]['items'] | count == 0" +  delay: 10 +  retries: "{{ (glusterfs_timeout / 10) | int }}" +  when: glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate +  oc_obj: +    namespace: "{{ glusterfs_namespace }}" +    kind: pod +    state: list +    selector: "glusterfs=heketi-pod" +  register: heketi_pod +  until: "heketi_pod.results.results[0]['items'] | count == 0" +  delay: 10 +  retries: "{{ (glusterfs_timeout / 10) | int }}" +  when: glusterfs_heketi_wipe + +- name: Create heketi service account +  oc_serviceaccount: +    namespace: "{{ glusterfs_namespace }}" +    name: heketi-service-account +    state: present +  when: glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC +  oc_adm_policy_user: +    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" +    resource_kind: scc +    resource_name: privileged +    state: present +  when: glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods +  oc_adm_policy_user: +    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" +    resource_kind: role +    resource_name: edit +    state: present +  when: glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod +  oc_obj: +    namespace: "{{ glusterfs_namespace }}" +    state: list +    kind: pod +    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" +  register: heketi_pod +  when: glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi +  set_fact: +    glusterfs_heketi_deploy_is_missing: False +  when: +  - "glusterfs_heketi_is_native" +  - "heketi_pod.results.results[0]['items'] | count > 0" +  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True +  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod +  oc_obj: +    namespace: "{{ glusterfs_namespace }}" +    state: list +    kind: pod +    selector: "glusterfs=heketi-pod" +  register: heketi_pod +  when: glusterfs_heketi_is_native + +- name: Check if need to deploy heketi +  set_fact: +    glusterfs_heketi_is_missing: False +  when: +  - "glusterfs_heketi_is_native" +  - "heketi_pod.results.results[0]['items'] | count > 0" +  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True +  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml +  when: +  - glusterfs_heketi_is_native +  - glusterfs_heketi_deploy_is_missing +  - glusterfs_heketi_is_missing + +- name: Determine heketi URL +  oc_obj: +    namespace: "{{ glusterfs_namespace }}" +    state: list +    kind: ep +    selector: "glusterfs in (deploy-heketi-service, heketi-service)" +  register: heketi_url +  until: +  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" +  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" +  delay: 10 +  retries: "{{ (glusterfs_timeout / 10) | int }}" +  when: +  - glusterfs_heketi_is_native +  - glusterfs_heketi_url is undefined + +- name: Set heketi URL +  set_fact: +    glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" +  when: +  - glusterfs_heketi_is_native +  - glusterfs_heketi_url is undefined + +- name: Verify heketi service +  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" +  changed_when: False + +- name: Generate topology file +  template: +    src: "{{ openshift.common.examples_content_version }}/topology.json.j2" +    dest: "{{ mktemp.stdout }}/topology.json" +  when: +  - glusterfs_heketi_topology_load + +- name: Load heketi topology +  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" +  register: topology_load +  failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" +  when: +  - glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml +  when: +  - glusterfs_heketi_is_native +  - glusterfs_heketi_is_missing diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml new file mode 100644 index 000000000..451990240 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -0,0 +1,22 @@ +--- +- set_fact: +    glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}" +    glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" +    glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}" +    glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}" +    glusterfs_image: "{{ openshift_storage_glusterfs_image }}" +    glusterfs_version: "{{ openshift_storage_glusterfs_version }}" +    glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}" +    glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" +    glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" +    glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" +    glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" +    glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" +    glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +    glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" +    glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" +    glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" +    glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" +    glusterfs_nodes: "{{ g_glusterfs_hosts }}" + +- include: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 2b35e5137..579112349 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -1,44 +1,44 @@  ---  - assert: -    that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" +    that: "glusterfs_nodeselector.keys() | count == 1"      msg: Only one GlusterFS nodeselector key pair should be provided  - assert: -    that: "groups.oo_glusterfs_to_config | count >= 3" +    that: "glusterfs_nodes | count >= 3"      msg: There must be at least three GlusterFS nodes specified  - name: Delete pre-existing GlusterFS resources    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: "template,daemonset"      name: glusterfs      state: absent -  when: openshift_storage_glusterfs_wipe +  when: glusterfs_wipe  - name: Unlabel any existing GlusterFS nodes    oc_label:      name: "{{ item }}"      kind: node      state: absent -    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" +    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"    with_items: "{{ groups.all }}" -  when: openshift_storage_glusterfs_wipe +  when: glusterfs_wipe  - name: Delete pre-existing GlusterFS config    file:      path: /var/lib/glusterd      state: absent    delegate_to: "{{ item }}" -  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" -  when: openshift_storage_glusterfs_wipe +  with_items: "{{ glusterfs_nodes | default([]) }}" +  when: glusterfs_wipe  - name: Get GlusterFS storage devices state    command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"    register: devices_info    delegate_to: "{{ item }}" -  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" +  with_items: "{{ glusterfs_nodes | default([]) }}"    failed_when: False -  when: openshift_storage_glusterfs_wipe +  when: glusterfs_wipe    # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.  - name: Clear GlusterFS storage device contents @@ -46,12 +46,12 @@    delegate_to: "{{ item.item }}"    with_items: "{{ devices_info.results }}"    when: -  - openshift_storage_glusterfs_wipe +  - glusterfs_wipe    - item.stdout_lines | count > 0  - name: Add service accounts to privileged SCC    oc_adm_policy_user: -    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" +    user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"      resource_kind: scc      resource_name: privileged      state: present @@ -64,8 +64,8 @@      name: "{{ glusterfs_host }}"      kind: node      state: add -    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" -  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" +    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" +  with_items: "{{ glusterfs_nodes | default([]) }}"    loop_control:      loop_var: glusterfs_host @@ -76,7 +76,7 @@  - name: Create GlusterFS template    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: template      name: glusterfs      state: present @@ -85,16 +85,16 @@  - name: Deploy GlusterFS pods    oc_process: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      template_name: "glusterfs"      create: True      params: -      IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" -      IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" +      IMAGE_NAME: "{{ glusterfs_image }}" +      IMAGE_VERSION: "{{ glusterfs_version }}"  - name: Wait for GlusterFS pods    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: pod      state: list      selector: "glusterfs-node=pod" @@ -102,6 +102,6 @@    until:    - "glusterfs_pods.results.results[0]['items'] | count > 0"    # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods -  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" +  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"    delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 6d02d2090..392f4b65b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -1,7 +1,30 @@  --- +- set_fact: +    glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}" +    glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" +    glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" +    glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}" +    glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" +    glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" +    glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}" +    glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}" +    glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}" +    glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}" +    glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}" +    glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}" +    glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}" +    glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}" +    glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" +    glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" +    glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" +    glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}" + +- include: glusterfs_common.yml +  when: g_glusterfs_registry_hosts != g_glusterfs_hosts +  - name: Delete pre-existing GlusterFS registry resources    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: "{{ item.kind }}"      name: "{{ item.name | default(omit) }}"      selector: "{{ item.selector | default(omit) }}" @@ -23,7 +46,7 @@  - name: Create GlusterFS registry endpoints    oc_obj: -    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +    namespace: "{{ glusterfs_namespace }}"      state: present      kind: endpoints      name: glusterfs-registry-endpoints @@ -32,7 +55,7 @@  - name: Create GlusterFS registry service    oc_obj: -    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +    namespace: "{{ glusterfs_namespace }}"      state: present      kind: service      name: glusterfs-registry-endpoints @@ -40,9 +63,9 @@      - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"  - name: Check if GlusterFS registry volume exists -  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" +  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"    register: registry_volume  - name: Create GlusterFS registry volume -  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" -  when: "'openshift.hosted.registry.storage.glusterfs.path' not in registry_volume.stdout" +  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" +  when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 76ae1db75..c14fcfb15 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -8,7 +8,7 @@  - name: Create deploy-heketi resources    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: template      name: deploy-heketi      state: present @@ -17,18 +17,18 @@  - name: Deploy deploy-heketi pod    oc_process: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      template_name: "deploy-heketi"      create: True      params: -      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" -      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" -      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" -      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +      IMAGE_NAME: "{{ glusterfs_heketi_image }}" +      IMAGE_VERSION: "{{ glusterfs_heketi_version }}" +      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" +      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"  - name: Wait for deploy-heketi pod    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: pod      state: list      selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" @@ -38,4 +38,4 @@    # Pod's 'Ready' status must be True    - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"    delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 778b5a673..64410a9ab 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@  ---  - name: Create heketi DB volume -  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" +  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"    register: setup_storage    failed_when: False @@ -13,12 +13,12 @@  # Need `command` here because heketi-storage.json contains multiple objects.  - name: Copy heketi DB to GlusterFS volume -  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" +  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"    when: setup_storage.rc == 0  - name: Wait for copy job to finish    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: job      state: list      name: "heketi-storage-copy-job" @@ -28,7 +28,7 @@    # Pod's 'Complete' status must be True    - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"    delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  retries: "{{ (glusterfs_timeout / 10) | int }}"    failed_when:    - "'results' in heketi_job.results"    - "heketi_job.results.results | count > 0" @@ -38,7 +38,7 @@  - name: Delete deploy resources    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: "{{ item.kind }}"      name: "{{ item.name | default(omit) }}"      selector: "{{ item.selector | default(omit) }}" @@ -55,7 +55,7 @@  - name: Create heketi resources    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: template      name: heketi      state: present @@ -64,18 +64,18 @@  - name: Deploy heketi pod    oc_process: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      template_name: "heketi"      create: True      params: -      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" -      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" -      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" -      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +      IMAGE_NAME: "{{ glusterfs_heketi_image }}" +      IMAGE_VERSION: "{{ glusterfs_heketi_version }}" +      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" +      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"  - name: Wait for heketi pod    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      kind: pod      state: list      selector: "glusterfs=heketi-pod" @@ -85,11 +85,11 @@    # Pod's 'Ready' status must be True    - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"    delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  retries: "{{ (glusterfs_timeout / 10) | int }}"  - name: Determine heketi URL    oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" +    namespace: "{{ glusterfs_namespace }}"      state: list      kind: ep      selector: "glusterfs=heketi-service" @@ -98,12 +98,12 @@    - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"    - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"    delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" +  retries: "{{ (glusterfs_timeout / 10) | int }}"  - name: Set heketi URL    set_fact: -    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" +    glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"  - name: Verify heketi service -  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" +  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"    changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index 71c4a2732..ebd8db453 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -5,174 +5,14 @@    changed_when: False    check_mode: no -- name: Verify target namespace exists -  oc_project: -    state: present -    name: "{{ openshift_storage_glusterfs_namespace }}" -  when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native - -- include: glusterfs_deploy.yml -  when: openshift_storage_glusterfs_is_native - -- name: Make sure heketi-client is installed -  package: name=heketi-client state=present - -- name: Delete pre-existing heketi resources -  oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" -    kind: "{{ item.kind }}" -    name: "{{ item.name | default(omit) }}" -    selector: "{{ item.selector | default(omit) }}" -    state: absent -  with_items: -  - kind: "template,route,service,jobs,dc,secret" -    selector: "deploy-heketi" -  - kind: "template,route,dc,service" -    name: "heketi" -  - kind: "svc,ep" -    name: "heketi-storage-endpoints" -  - kind: "sa" -    name: "heketi-service-account" -  failed_when: False -  when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for deploy-heketi pods to terminate -  oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" -    kind: pod -    state: list -    selector: "glusterfs=deploy-heketi-pod" -  register: heketi_pod -  until: "heketi_pod.results.results[0]['items'] | count == 0" -  delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" -  when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for heketi pods to terminate -  oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" -    kind: pod -    state: list -    selector: "glusterfs=heketi-pod" -  register: heketi_pod -  until: "heketi_pod.results.results[0]['items'] | count == 0" -  delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" -  when: openshift_storage_glusterfs_heketi_wipe - -- name: Create heketi service account -  oc_serviceaccount: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" -    name: heketi-service-account -    state: present -  when: openshift_storage_glusterfs_heketi_is_native - -- name: Add heketi service account to privileged SCC -  oc_adm_policy_user: -    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" -    resource_kind: scc -    resource_name: privileged -    state: present -  when: openshift_storage_glusterfs_heketi_is_native - -- name: Allow heketi service account to view/edit pods -  oc_adm_policy_user: -    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" -    resource_kind: role -    resource_name: edit -    state: present -  when: openshift_storage_glusterfs_heketi_is_native - -- name: Check for existing deploy-heketi pod -  oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" -    state: list -    kind: pod -    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" -  register: heketi_pod -  when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy deploy-heketi -  set_fact: -    openshift_storage_glusterfs_heketi_deploy_is_missing: False -  when: -  - "openshift_storage_glusterfs_heketi_is_native" -  - "heketi_pod.results.results[0]['items'] | count > 0" -  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True -  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- name: Check for existing heketi pod -  oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" -    state: list -    kind: pod -    selector: "glusterfs=heketi-pod" -  register: heketi_pod -  when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy heketi -  set_fact: -    openshift_storage_glusterfs_heketi_is_missing: False +- include: glusterfs_config.yml    when: -  - "openshift_storage_glusterfs_heketi_is_native" -  - "heketi_pod.results.results[0]['items'] | count > 0" -  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True -  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- include: heketi_deploy_part1.yml -  when: -  - openshift_storage_glusterfs_heketi_is_native -  - openshift_storage_glusterfs_heketi_deploy_is_missing -  - openshift_storage_glusterfs_heketi_is_missing - -- name: Determine heketi URL -  oc_obj: -    namespace: "{{ openshift_storage_glusterfs_namespace }}" -    state: list -    kind: ep -    selector: "glusterfs in (deploy-heketi-service, heketi-service)" -  register: heketi_url -  until: -  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" -  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" -  delay: 10 -  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" -  when: -  - openshift_storage_glusterfs_heketi_is_native -  - openshift_storage_glusterfs_heketi_url is undefined - -- name: Set heketi URL -  set_fact: -    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" -  when: -  - openshift_storage_glusterfs_heketi_is_native -  - openshift_storage_glusterfs_heketi_url is undefined - -- name: Verify heketi service -  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" -  changed_when: False - -- name: Generate topology file -  template: -    src: "{{ openshift.common.examples_content_version }}/topology.json.j2" -    dest: "{{ mktemp.stdout }}/topology.json" -  when: -  - openshift_storage_glusterfs_is_native -  - openshift_storage_glusterfs_heketi_topology_load - -- name: Load heketi topology -  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" -  register: topology_load -  failed_when: topology_load.rc != 0 or 'Unable' in topology_load.stdout -  when: -  - openshift_storage_glusterfs_is_native -  - openshift_storage_glusterfs_heketi_topology_load - -- include: heketi_deploy_part2.yml -  when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing +  - g_glusterfs_hosts | default([]) | count > 0  - include: glusterfs_registry.yml -  when: openshift.hosted.registry.storage.kind == 'glusterfs' +  when: +  - g_glusterfs_registry_hosts | default([]) | count > 0 +  - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"  - name: Delete temp directory    file: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index d72d085c9..605627ab5 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -4,7 +4,7 @@ metadata:    name: glusterfs-registry-endpoints  subsets:  - addresses: -{% for node in groups.oo_glusterfs_to_config %} +{% for node in glusterfs_nodes %}    - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}  {% endfor %}    ports: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 index eb5b4544f..33d8f9b36 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 @@ -1,7 +1,7 @@  {    "clusters": [  {%- set clusters = {} -%} -{%- for node in groups.oo_glusterfs_to_config -%} +{%- for node in glusterfs_nodes -%}    {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}    {%- if cluster in clusters -%}      {%- set _dummy = clusters[cluster].append(node) -%} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index fa9b20e92..d8b1158a6 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -86,8 +86,16 @@    include: set_version_rpm.yml    when: not is_containerized | bool -- name: Set openshift_version for containerized installation -  include: set_version_containerized.yml +- block: +  - name: Set openshift_version for containerized installation +    include: set_version_containerized.yml +  - name: Determine openshift rpm version +    include: rpm_version.yml +  - name: Fail if rpm version and docker image version are different +    fail: +      msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" +    # Both versions have the same string representation +    when: openshift_rpm_version != openshift_version    when: is_containerized | bool  # Warn if the user has provided an openshift_image_tag but is not doing a containerized install diff --git a/roles/openshift_version/tasks/rpm_version.yml b/roles/openshift_version/tasks/rpm_version.yml new file mode 100644 index 000000000..bd5e94b43 --- /dev/null +++ b/roles/openshift_version/tasks/rpm_version.yml @@ -0,0 +1,44 @@ +--- +# input_variables: +# - repoquery_cmd +# - openshift.common.service_type +# output_variables: +# - openshift_rpm_version + +# if {{ openshift.common.service_type}}-excluder is enabled, +# the repoquery for {{ openshift.common.service_type}} will not work. +# Thus, create a temporary yum,conf file where exclude= is set to an empty list +- name: Create temporary yum.conf file +  command: mktemp -d /tmp/yum.conf.XXXXXX +  register: yum_conf_temp_file_result + +- set_fact: +    yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf" + +- name: Copy yum.conf into the temporary file +  copy: +    src: /etc/yum.conf +    dest: "{{ yum_conf_temp_file }}" +    remote_src: True + +- name: Clear the exclude= list in the temporary yum.conf +  lineinfile: +    # since ansible 2.3 s/dest/path +    dest: "{{ yum_conf_temp_file }}" +    regexp: '^exclude=' +    line: 'exclude=' + +- name: Gather common package version +  command: > +    {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}" +  register: common_version +  failed_when: false +  changed_when: false + +- name: Delete the temporary yum.conf +  file: +    path: "{{ yum_conf_temp_file_result.stdout }}" +    state: absent + +- set_fact: +    openshift_rpm_version: "{{ common_version.stdout | default('0.0', True) }}" diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml index c7604af1a..3cf78068b 100644 --- a/roles/openshift_version/tasks/set_version_rpm.yml +++ b/roles/openshift_version/tasks/set_version_rpm.yml @@ -7,42 +7,8 @@    - openshift_pkg_version is defined    - openshift_version is not defined -# if {{ openshift.common.service_type}}-excluder is enabled, -# the repoquery for {{ openshift.common.service_type}} will not work. -# Thus, create a temporary yum,conf file where exclude= is set to an empty list -- name: Create temporary yum.conf file -  command: mktemp -d /tmp/yum.conf.XXXXXX -  register: yum_conf_temp_file_result - -- set_fact: -    yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf" - -- name: Copy yum.conf into the temporary file -  copy: -    src: /etc/yum.conf -    dest: "{{ yum_conf_temp_file }}" -    remote_src: True - -- name: Clear the exclude= list in the temporary yum.conf -  lineinfile: -    # since ansible 2.3 s/dest/path -    dest: "{{ yum_conf_temp_file }}" -    regexp: '^exclude=' -    line: 'exclude=' - -- name: Gather common package version -  command: > -    {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}" -  register: common_version -  failed_when: false -  changed_when: false -  when: openshift_version is not defined - -- name: Delete the temporary yum.conf -  file: -    path: "{{ yum_conf_temp_file_result.stdout }}" -    state: absent - -- set_fact: -    openshift_version: "{{ common_version.stdout | default('0.0', True) }}" +- block: +  - include: rpm_version.yml +  - set_fact: +      openshift_version: "{{ openshift_rpm_version }}"    when: openshift_version is not defined diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 8d4878fa7..aeee3ede8 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -1,6 +1,5 @@  #!/usr/bin/python  # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4  # pylint: disable=fixme, missing-docstring  import subprocess  | 
