diff options
Diffstat (limited to 'roles')
51 files changed, 956 insertions, 386 deletions
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index 0b3a2a69d..ce2ae8365 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -1,7 +1,7 @@  ---  __ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose- -__ansible_service_broker_image_tag: latest +__ansible_service_broker_image_tag: v3.6  __ansible_service_broker_etcd_image_prefix: rhel7/  __ansible_service_broker_etcd_image_tag: latest diff --git a/roles/cockpit/defaults/main.yml b/roles/cockpit/defaults/main.yml index cbe5bb92b..15c40e3b5 100644 --- a/roles/cockpit/defaults/main.yml +++ b/roles/cockpit/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_cockpit_firewall_enabled: True -r_cockpit_use_firewalld: False +r_cockpit_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_cockpit_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_cockpit_os_firewall_deny: []  r_cockpit_os_firewall_allow: diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml index a7803c0ee..919e3aa7b 100644 --- a/roles/nuage_common/defaults/main.yaml +++ b/roles/nuage_common/defaults/main.yaml @@ -10,5 +10,8 @@ nuage_ca_serial: "{{ nuage_ca_dir }}/nuageMonCA.serial.txt"  nuage_master_mon_dir: /usr/share/nuage-openshift-monitor  nuage_node_plugin_dir: /usr/share/vsp-openshift +nuage_node_cni_bin_dir: /opt/cni/bin +nuage_node_cni_netconf_dir: /etc/cni/net.d +  nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"  nuage_mon_cert_validity_period: "{{ nuage_cert_validity_period | default('3650') }}" diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml new file mode 100644 index 000000000..6c8c9f8d2 --- /dev/null +++ b/roles/nuage_common/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- name: Set the Nuage plugin openshift directory fact to handle Atomic host install +  set_fact: +    nuage_node_plugin_dir: /var/usr/share/vsp-openshift +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage CNI network config directory fact to handle Atomic host install +  set_fact: +    nuage_node_cni_netconf_dir: /var/etc/cni/net.d/ +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage CNI binary directory fact to handle Atomic host install +  set_fact: +    nuage_node_cni_bin_dir: /var/opt/cni/bin/ +  when: openshift.common.is_atomic | bool + +- name: Assure CNI plugin config dir exists before daemon set install +  become: yes +  file: path="{{ nuage_node_plugin_dir }}" state=directory + +- name: Assure CNI netconf directory exists before daemon set install +  become: yes +  file: path="{{ nuage_node_cni_netconf_dir }}" state=directory + +- name: Assure CNI plugin binary directory exists before daemon set install +  become: yes +  file: path="{{ nuage_node_cni_bin_dir }}" state=directory diff --git a/roles/nuage_master/defaults/main.yml b/roles/nuage_master/defaults/main.yml index ffab25775..5f1d8686a 100644 --- a/roles/nuage_master/defaults/main.yml +++ b/roles/nuage_master/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_nuage_master_firewall_enabled: True -r_nuage_master_use_firewalld: False +r_nuage_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_nuage_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  nuage_mon_rest_server_port: '9443' diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index ad7bbb111..21da6b953 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,8 +1,4 @@  --- -- name: restart nuage-openshift-monitor -  become: yes -  systemd: name=nuage-openshift-monitor state=restarted -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when: > diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index d0363c981..f3c487132 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -3,17 +3,64 @@    include: firewall.yml    static: yes +- name: Set the Nuage certificate directory fact for Atomic hosts +  set_fact: +    cert_output_dir: /var/usr/share/nuage-openshift-monitor +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage kubeconfig file path fact for Atomic hosts +  set_fact: +    kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage monitor yaml location fact for Atomic hosts +  set_fact: +    kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage monitor certs location fact for Atomic hosts +  set_fact: +    nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/ +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage master config directory for daemon sets install +  set_fact: +    nuage_master_config_dsets_mount_dir: /var/usr/share/ +  when: master_host_type == "is_atomic" + +- name: Set the Nuage node config directory for daemon sets install +  set_fact: +    nuage_node_config_dsets_mount_dir: /var/usr/share/ +  when: slave_host_type == "is_atomic" + +- name: Set the Nuage CNI plugin binary directory for daemon sets install +  set_fact: +    nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin +  when: openshift.common.is_atomic | bool +  - name: Create directory /usr/share/nuage-openshift-monitor    become: yes    file: path=/usr/share/nuage-openshift-monitor state=directory +  when: not openshift.common.is_atomic | bool -- name: Create the log directory +- name: Create directory /var/usr/share/nuage-openshift-monitor    become: yes -  file: path={{ nuage_mon_rest_server_logdir }} state=directory +  file: path=/var/usr/share/nuage-openshift-monitor state=directory +  when: openshift.common.is_atomic | bool + +- name: Create directory /var/usr/bin for monitor binary on atomic +  become: yes +  file: path=/var/usr/bin state=directory +  when: openshift.common.is_atomic | bool -- name: Install Nuage Openshift Monitor +- name: Create CNI bin directory /var/opt/cni/bin    become: yes -  yum: name={{ nuage_openshift_rpm }} state=present +  file: path=/var/opt/cni/bin state=directory +  when: openshift.common.is_atomic | bool + +- name: Create the log directory +  become: yes +  file: path={{ nuage_mon_rest_server_logdir }} state=directory  - include: serviceaccount.yml @@ -45,10 +92,32 @@    become: yes    copy: src="{{ vsd_user_key_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_key_file | basename }}" -- name: Create nuage-openshift-monitor.yaml +- name: Create Nuage master daemon set yaml file +  become: yes +  template: src=nuage-master-config-daemonset.j2 dest=/etc/nuage-master-config-daemonset.yaml owner=root mode=0644 + +- name: Create Nuage node daemon set yaml file    become: yes -  template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644 +  template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644 + +- name: Add the service account to the privileged scc to have root permissions +  shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 + +- name: Spawn Nuage Master monitor daemon sets pod +  shell: oc create -f /etc/nuage-master-config-daemonset.yaml +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 + +- name: Spawn Nuage CNI daemon sets pod +  shell: oc create -f /etc/nuage-node-config-daemonset.yaml +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 + +- name: Restart daemons +  command: /bin/true    notify:      - restart master api      - restart master controllers -    - restart nuage-openshift-monitor +  ignore_errors: true diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 new file mode 100755 index 000000000..612d689c2 --- /dev/null +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -0,0 +1,111 @@ +# This ConfigMap is used to configure Nuage VSP configuration on master nodes +kind: ConfigMap +apiVersion: v1 +metadata: +  name: nuage-master-config +  namespace: kube-system +data: +  # This will generate the required Nuage configuration +  # on master nodes +  monitor_yaml_config: | + +      # .kubeconfig that includes the nuage service account +      kubeConfig: {{ nuage_master_crt_dir }}/nuage.kubeconfig +      # name of the nuage service account, or another account with 'cluster-reader' +      # permissions +      # Openshift master config file +      masterConfig: /etc/origin/master/master-config.yaml +      # URL of the VSD Architect +      vsdApiUrl: {{ vsd_api_url }} +      # API version to query against.  Usually "v3_2" +      vspVersion: {{ vsp_version }} +      # Name of the enterprise in which pods will reside +      enterpriseName: {{ enterprise }} +      # Name of the domain in which pods will reside +      domainName: {{ domain }} +      # VSD generated user certificate file location on master node +      userCertificateFile: {{ nuage_master_crt_dir }}/{{ vsd_user }}.pem +      # VSD generated user key file location on master node +      userKeyFile: {{ nuage_master_crt_dir }}/{{ vsd_user }}-Key.pem +      # Location where logs should be saved +      log_dir: /var/log/nuage-openshift-monitor +      # Monitor rest server parameters +      # Logging level for the nuage openshift monitor +      # allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL +      logLevel: 0 +      # Parameters related to the nuage monitor REST server +      nuageMonServer: +          URL: 0.0.0.0:9443 +          certificateDirectory: {{ nuage_master_crt_dir }} +      # etcd config required for HA +      etcdClientConfig: +          ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt +          certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt +          keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key +          urls: +      {% for etcd_url in openshift.master.etcd_urls %} +              - {{ etcd_url }} +      {% endfor %} + +--- + +# This manifest installs Nuage master node configuration on +# each Nuage master node in a cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: nuage-master-config +  namespace: kube-system +  labels: +    k8s-app: nuage-master-config +spec: +  selector: +    matchLabels: +      k8s-app: nuage-master-config +  template: +    metadata: +      labels: +        k8s-app: nuage-master-config +    spec: +      hostNetwork: true +      tolerations: +        - key: node-role.kubernetes.io/master +          effect: NoSchedule +          operator: Exists +      nodeSelector: +        install-monitor: "true" +      containers: +        # This container configures Nuage Master node +        - name: install-nuage-master-config +          image: nuage/master:{{ nuage_monitor_container_image_version }} +          ports: +            - containerPort: 9443 +              hostPort: 9443 +          command: ["/configure-master.sh"] +          args: ["ose", "{{ master_host_type }}"] +          securityContext: +            privileged: true +          env: +            # nuage-openshift-monitor.yaml config to install on each slave node. +            - name: NUAGE_MASTER_VSP_CONFIG +              valueFrom: +                configMapKeyRef: +                  name: nuage-master-config +                  key: monitor_yaml_config +          volumeMounts: +            - mountPath: /var/log +              name: cni-log-dir +            - mountPath: {{ nuage_master_config_dsets_mount_dir }} +              name: usr-share-dir +            - mountPath: /etc/origin/ +              name: master-config-dir +      volumes: +        - name: cni-log-dir +          hostPath: +            path: /var/log +        - name: usr-share-dir +          hostPath: +            path: {{ nuage_master_config_dsets_mount_dir }} +        - name: master-config-dir +          hostPath: +            path: /etc/origin/ diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 new file mode 100755 index 000000000..02e9a1563 --- /dev/null +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -0,0 +1,206 @@ +# This ConfigMap is used to configure Nuage VSP configuration +kind: ConfigMap +apiVersion: v1 +metadata: +  name: nuage-config +  namespace: kube-system +data: +  # This will generate the required Nuage vsp-openshift.yaml +  # config on each slave node +  plugin_yaml_config: | +      clientCert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/client.crt +      # The key to the certificate in clientCert above +      clientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/client.key +      # The certificate authority's certificate for the local kubelet.  Usually the +      # same as the CA cert used to create the client Cert/Key pair. +      CACert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/ca.crt +      # Name of the enterprise in which pods will reside +      enterpriseName: {{ enterprise }} +      # Name of the domain in which pods will reside +      domainName: {{ domain }} +      # Name of the VSD user in admin group +      vsdUser: {{ vsd_user }} +      # IP address and port number of master API server +      masterApiServer: {{ api_server_url }} +      # REST server URL  +      nuageMonRestServer: {{ nuage_mon_rest_server_url }} +      # Bridge name for the docker bridge +      dockerBridgeName: docker0 +      # Certificate for connecting to the openshift monitor REST api +      nuageMonClientCert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.crt +      # Key to the certificate in restClientCert +      nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key +      # CA certificate for verifying the master's rest server +      nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt +      # Nuage vport mtu size +      interfaceMTU: {{ nuage_vport_mtu  }} +      # Logging level for the plugin +      # allowed options are: "dbg", "info", "warn", "err", "emer", "off" +      logLevel: 3 + +  # This will generate the required Nuage CNI yaml configuration +  cni_yaml_config: | +      vrsendpoint: "/var/run/openvswitch/db.sock" +      vrsbridge: "alubr0" +      monitorinterval: 60 +      cniversion: 0.2.0 +      loglevel: "info" +      portresolvetimer: 60 +      logfilesize: 1 +      vrsconnectionchecktimer: 180 +      mtu: 1450 +      staleentrytimeout: 600 + +--- + +# This manifest installs Nuage CNI plugins and network config on +# each worker node in Openshift cluster +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: nuage-cni-ds +  namespace: kube-system +  labels: +    k8s-app: nuage-cni-ds +spec: +  selector: +    matchLabels: +      k8s-app: nuage-cni-ds +  template: +    metadata: +      labels: +        k8s-app: nuage-cni-ds +    spec: +      hostNetwork: true +      tolerations: +        - key: node-role.kubernetes.io/master +          effect: NoSchedule +          operator: Exists +      containers: +        # This container installs Nuage CNI binaries +        # and CNI network config file on each node. +        - name: install-nuage-cni +          image: nuage/cni:{{ nuage_cni_container_image_version }} +          command: ["/install-cni.sh"] +          args: ["nuage-cni-openshift", "{{ slave_host_type }}"] +          securityContext: +            privileged: true +          env: +            # Nuage vsp-openshift.yaml config to install on each slave node. +            - name: NUAGE_VSP_CONFIG +              valueFrom: +                configMapKeyRef: +                  name: nuage-config +                  key: plugin_yaml_config +            # Nuage nuage-cni.yaml config to install on each slave node. +            - name: NUAGE_CNI_YAML_CONFIG +              valueFrom: +                configMapKeyRef: +                  name: nuage-config +                  key: cni_yaml_config +            # Nuage cluster network CIDR for iptables configuration +            - name: NUAGE_CLUSTER_NW_CIDR +              value: "{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}" +          volumeMounts: +            - mountPath: /host/opt/cni/bin +              name: cni-bin-dir +            - mountPath: /host/etc/cni/net.d +              name: cni-net-dir +            - mountPath: /etc/default +              name: cni-yaml-dir +            - mountPath: /var/run +              name: var-run-dir +            - mountPath: /var/log +              name: cni-log-dir +            - mountPath: {{ nuage_node_config_dsets_mount_dir }} +              name: usr-share-dir +      volumes: +        - name: cni-bin-dir +          hostPath: +            path: {{ nuage_cni_bin_dsets_mount_dir }} +        - name: cni-net-dir +          hostPath: +            path: {{ nuage_cni_netconf_dsets_mount_dir }} +        - name: cni-yaml-dir +          hostPath: +            path: /etc/default +        - name: var-run-dir +          hostPath: +            path: /var/run +        - name: cni-log-dir +          hostPath: +            path: /var/log +        - name: usr-share-dir +          hostPath: +            path: {{ nuage_node_config_dsets_mount_dir }} + +--- + +# This manifest installs Nuage VRS on +# each worker node in an Openshift cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: nuage-vrs-ds +  namespace: kube-system +  labels: +    k8s-app: nuage-vrs-ds +spec: +  selector: +    matchLabels: +      k8s-app: nuage-vrs-ds +  updateStrategy: +    type: RollingUpdate +  template: +    metadata: +      labels: +        k8s-app: nuage-vrs-ds +    spec: +      hostNetwork: true +      tolerations: +        - key: node-role.kubernetes.io/master +          effect: NoSchedule +          operator: Exists +      containers: +        # This container installs Nuage VRS running as a +        # container on each worker node +        - name: install-nuage-vrs +          image: nuage/vrs:{{ nuage_vrs_container_image_version }} +          securityContext: +            privileged: true +          env: +            # Configure parameters for VRS openvswitch file +            - name: NUAGE_ACTIVE_CONTROLLER +              value: "{{ vsc_active_ip }}" +            - name: NUAGE_STANDBY_CONTROLLER +              value: "{{ vsc_standby_ip }}" +            - name: NUAGE_PLATFORM +              value: '"kvm, k8s"' +            - name: NUAGE_K8S_SERVICE_IPV4_SUBNET +              value: '192.168.0.0\/16' +            - name: NUAGE_NETWORK_UPLINK_INTF +              value: "eth0" +          volumeMounts: +            - mountPath: /var/run +              name: vrs-run-dir +            - mountPath: /var/log +              name: vrs-log-dir +            - mountPath: /sys/module +              name: sys-mod-dir +              readOnly: true +            - mountPath: /lib/modules +              name: lib-mod-dir +              readOnly: true +      volumes: +        - name: vrs-run-dir +          hostPath: +            path: /var/run +        - name: vrs-log-dir +          hostPath: +            path: /var/log +        - name: sys-mod-dir +          hostPath: +            path: /sys/module +        - name: lib-mod-dir +          hostPath: +            path: /lib/modules diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2 deleted file mode 100644 index e077128a4..000000000 --- a/roles/nuage_master/templates/nuage-openshift-monitor.j2 +++ /dev/null @@ -1,41 +0,0 @@ -# .kubeconfig that includes the nuage service account -kubeConfig: {{ kube_config }} -# name of the nuage service account, or another account with 'cluster-reader' -# permissions -# Openshift master config file -masterConfig: {{ master_config_yaml }}  -# URL of the VSD Architect -vsdApiUrl: {{ vsd_api_url }}  -# API version to query against.  Usually "v3_2" -vspVersion: {{ vsp_version }}  -# File containing a VSP license to install.  Only necessary if no license has -# been installed on the VSD Architect before, only valid for standalone vsd install -# licenseFile: "/path/to/base_vsp_license.txt" -# Name of the enterprise in which pods will reside -enterpriseName: {{ enterprise }}  -# Name of the domain in which pods will reside -domainName: {{ domain }} -# VSD generated user certificate file location on master node -userCertificateFile: {{ cert_output_dir }}/{{ vsd_user_cert_file | basename }} -# VSD generated user key file location on master node -userKeyFile: {{ cert_output_dir }}/{{ vsd_user_key_file | basename }} -# Location where logs should be saved -log_dir: {{ nuage_mon_rest_server_logdir }} -# Monitor rest server parameters -# Logging level for the nuage openshift monitor -# allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL -logLevel: {{ nuage_mon_log_level }} -# Parameters related to the nuage monitor REST server -nuageMonServer: -    URL: {{ nuage_mon_rest_server_url }} -    certificateDirectory: {{ cert_output_dir }} -# etcd config required for HA -etcdClientConfig: -    ca: {{ openshift_master_config_dir }}/{{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} -    certFile: {{ openshift_master_config_dir }}/master.etcd-client.crt -    keyFile: {{ openshift_master_config_dir }}/master.etcd-client.key -    urls: -{% for etcd_url in openshift.master.etcd_urls %} -        - {{ etcd_url }} -{% endfor %} - diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml index 57d5d2595..114514d7c 100644 --- a/roles/nuage_master/vars/main.yaml +++ b/roles/nuage_master/vars/main.yaml @@ -22,6 +22,18 @@ nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(open  nuage_master_crt_dir: /usr/share/nuage-openshift-monitor  nuage_service_account: system:serviceaccount:default:nuage +nuage_master_config_dsets_mount_dir: /usr/share/ +nuage_node_config_dsets_mount_dir: /usr/share/ +nuage_cni_bin_dsets_mount_dir: /opt/cni/bin +nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d +nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}" +nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}" +nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}" +api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" +nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}" +master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}" +slave_host_type: "{{ slave_base_host_type | default('is_rhel_server') }}" +  nuage_tasks:  - resource_kind: cluster-role    resource_name: cluster-reader diff --git a/roles/nuage_node/defaults/main.yml b/roles/nuage_node/defaults/main.yml index b3d2e3cec..9a2e34387 100644 --- a/roles/nuage_node/defaults/main.yml +++ b/roles/nuage_node/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_nuage_node_firewall_enabled: True -r_nuage_node_use_firewalld: False +r_nuage_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_nuage_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  nuage_mon_rest_server_port: '9443' diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml index 8384856ff..60247c33e 100644 --- a/roles/nuage_node/handlers/main.yaml +++ b/roles/nuage_node/handlers/main.yaml @@ -1,8 +1,4 @@  --- -- name: restart vrs -  become: yes -  systemd: name=openvswitch state=restarted -  - name: restart node    become: yes    systemd: name={{ openshift.common.service_type }}-node state=restarted diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index 66d6ef4ca..3764681ff 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -1,28 +1,18 @@  --- -- name: Install Nuage VRS -  become: yes -  yum: name={{ vrs_rpm }} state=present - -- name: Set the uplink interface -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}' - -- name: Set the Active Controller -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}' - -- name: Set the K8S/OSE Cluster service CIDR -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^K8S_SERVICE_IPV4_SUBNET line='K8S_SERVICE_IPV4_SUBNET={{ k8s_cluster_service_cidr }}' +- name: Set the Nuage plugin openshift directory fact for Atomic hosts +  set_fact: +    vsp_openshift_dir: /var/usr/share/vsp-openshift +  when: openshift.common.is_atomic | bool -- name: Set the Standby Controller -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}' -  when: vsc_standby_ip is defined +- name: Set the Nuage CNI binary directory fact for Atomic hosts +  set_fact: +    cni_bin_dir: /var/opt/cni/bin/ +  when: openshift.common.is_atomic | bool -- name: Install plugin rpm -  become: yes -  yum: name={{ plugin_rpm }} state=present +- name: Set the Nuage plugin certs directory fact for Atomic hosts +  set_fact: +    nuage_plugin_crt_dir: /var/usr/share/vsp-openshift +  when: openshift.common.is_atomic | bool  - name: Assure CNI conf dir exists    become: yes @@ -32,13 +22,6 @@    become: yes    file: path="{{ cni_bin_dir }}" state=directory -- name: Install CNI loopback plugin -  become: yes -  copy: -    src: "{{ k8s_cni_loopback_plugin }}" -    dest: "{{ cni_bin_dir }}/{{ k8s_cni_loopback_plugin | basename }}" -    mode: 0755 -  - name: Copy the certificates and keys    become: yes    copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}" @@ -50,12 +33,11 @@  - include: certificates.yml -- name: Set the vsp-openshift.yaml -  become: yes -  template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644 +- name: Restart node services +  command: /bin/true    notify: -    - restart vrs      - restart node +  ignore_errors: true  - include: iptables.yml diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2 deleted file mode 100644 index f6bccebc2..000000000 --- a/roles/nuage_node/templates/vsp-openshift.j2 +++ /dev/null @@ -1,29 +0,0 @@ -clientCert: {{ client_cert }}  -# The key to the certificate in clientCert above -clientKey: {{ client_key }} -# The certificate authority's certificate for the local kubelet.  Usually the -# same as the CA cert used to create the client Cert/Key pair. -CACert: {{ ca_cert }}  -# Name of the enterprise in which pods will reside -enterpriseName: {{ enterprise }}  -# Name of the domain in which pods will reside -domainName: {{ domain }} -# Name of the VSD user in admin group -vsdUser: {{ vsd_user }} -# IP address and port number of master API server -masterApiServer: {{ api_server }} -# REST server URL  -nuageMonRestServer: {{ nuage_mon_rest_server_url }} -# Bridge name for the docker bridge -dockerBridgeName: {{ docker_bridge }} -# Certificate for connecting to the kubemon REST API -nuageMonClientCert: {{ rest_client_cert }} -# Key to the certificate in restClientCert -nuageMonClientKey: {{ rest_client_key }}  -# CA certificate for verifying the master's rest server -nuageMonServerCA: {{ rest_server_ca_cert }} -# Nuage vport mtu size -interfaceMTU: {{ vport_mtu  }} -# Logging level for the plugin -# allowed options are: "dbg", "info", "warn", "err", "emer", "off" -logLevel: {{ plugin_log_level }} diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml index 15444c8d0..bcccd4042 100644 --- a/roles/openshift_aws_ami_copy/tasks/main.yml +++ b/roles/openshift_aws_ami_copy/tasks/main.yml @@ -1,7 +1,7 @@  ---  - fail:      msg: "{{ item }} needs to be defined" -  when: "{{ item }} is not defined" +  when: item is not defined    with_items:    - r_openshift_aws_ami_copy_src_ami    - r_openshift_aws_ami_copy_name diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml index b541b466c..32aac2666 100644 --- a/roles/openshift_aws_iam_kms/tasks/main.yml +++ b/roles/openshift_aws_iam_kms/tasks/main.yml @@ -1,7 +1,7 @@  ---  - fail:      msg: "{{ item.name }} needs to be defined." -  when: "{{ item.cond }}" +  when: item.cond | bool    with_items:    - name: "{{ r_openshift_aws_iam_kms_alias }}"      cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}" diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index c1de367d9..04a1ce873 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -15,4 +15,4 @@ dependencies:  - role: openshift_docker    when: not skip_docker_role | default(False) | bool  - role: openshift_common -- role: openshift_cli_facts +- role: openshift_facts diff --git a/roles/openshift_cli_facts/meta/main.yml b/roles/openshift_cli_facts/meta/main.yml deleted file mode 100644 index 59acde215..000000000 --- a/roles/openshift_cli_facts/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: -  author: Jason DeTiberus -  description: OpenShift CLI Facts -  company: Red Hat, Inc. -  license: Apache License, Version 2.0 -  min_ansible_version: 1.9 -  platforms: -  - name: EL -    versions: -    - 7 -  categories: -  - cloud -dependencies: -- role: openshift_facts diff --git a/roles/openshift_cli_facts/tasks/main.yml b/roles/openshift_cli_facts/tasks/main.yml deleted file mode 100644 index dd1ed8965..000000000 --- a/roles/openshift_cli_facts/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# TODO: move this to a new 'cli' role -- openshift_facts: -    role: common -    local_facts: -      cli_image: "{{ osm_image | default(None) }}" diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml index 3bc6c5813..fd57a864c 100644 --- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml @@ -10,6 +10,12 @@ metadata:      iconClass: "icon-rails"  objects:  - apiVersion: v1 +  kind: Secret +  metadata: +    name: "${NAME}-secrets" +  stringData: +    pg-password: "${DATABASE_PASSWORD}" +- apiVersion: v1    kind: Service    metadata:      annotations: @@ -148,7 +154,10 @@ objects:                value: "${DATABASE_USER}"              -                name: "POSTGRESQL_PASSWORD" -              value: "${DATABASE_PASSWORD}" +              valueFrom: +                secretKeyRef: +                  name: "${NAME}-secrets" +                  key: "pg-password"              -                name: "POSTGRESQL_DATABASE"                value: "${DATABASE_NAME}" @@ -345,7 +354,10 @@ objects:                  value: "${DATABASE_USER}"                -                  name: "POSTGRESQL_PASSWORD" -                value: "${DATABASE_PASSWORD}" +                valueFrom: +                  secretKeyRef: +                    name: "${NAME}-secrets" +                    key: "pg-password"                -                  name: "POSTGRESQL_DATABASE"                  value: "${DATABASE_NAME}" @@ -386,7 +398,8 @@ parameters:      displayName: "PostgreSQL Password"      required: true      description: "Password for the PostgreSQL user." -    value: "smartvm" +    from: "[a-zA-Z0-9]{8}" +    generate: expression    -      name: "DATABASE_NAME"      required: true diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml index 3bc6c5813..fd57a864c 100644 --- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml @@ -10,6 +10,12 @@ metadata:      iconClass: "icon-rails"  objects:  - apiVersion: v1 +  kind: Secret +  metadata: +    name: "${NAME}-secrets" +  stringData: +    pg-password: "${DATABASE_PASSWORD}" +- apiVersion: v1    kind: Service    metadata:      annotations: @@ -148,7 +154,10 @@ objects:                value: "${DATABASE_USER}"              -                name: "POSTGRESQL_PASSWORD" -              value: "${DATABASE_PASSWORD}" +              valueFrom: +                secretKeyRef: +                  name: "${NAME}-secrets" +                  key: "pg-password"              -                name: "POSTGRESQL_DATABASE"                value: "${DATABASE_NAME}" @@ -345,7 +354,10 @@ objects:                  value: "${DATABASE_USER}"                -                  name: "POSTGRESQL_PASSWORD" -                value: "${DATABASE_PASSWORD}" +                valueFrom: +                  secretKeyRef: +                    name: "${NAME}-secrets" +                    key: "pg-password"                -                  name: "POSTGRESQL_DATABASE"                  value: "${DATABASE_NAME}" @@ -386,7 +398,8 @@ parameters:      displayName: "PostgreSQL Password"      required: true      description: "Password for the PostgreSQL user." -    value: "smartvm" +    from: "[a-zA-Z0-9]{8}" +    generate: expression    -      name: "DATABASE_NAME"      required: true diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 05e53333d..8d35db6b5 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -1,76 +1,74 @@  """  Ansible action plugin to execute health checks in OpenShift clusters.  """ -# pylint: disable=wrong-import-position,missing-docstring,invalid-name  import sys  import os +import traceback  from collections import defaultdict +from ansible.plugins.action import ActionBase +from ansible.module_utils.six import string_types +  try:      from __main__ import display  except ImportError: +    # pylint: disable=ungrouped-imports; this is the standard way how to import +    # the default display object in Ansible action plugins.      from ansible.utils.display import Display      display = Display() -from ansible.plugins.action import ActionBase -from ansible.module_utils.six import string_types -  # Augment sys.path so that we can import checks from a directory relative to  # this callback plugin.  sys.path.insert(1, os.path.dirname(os.path.dirname(__file__))) +# pylint: disable=wrong-import-position; the import statement must come after +# the manipulation of sys.path.  from openshift_checks import OpenShiftCheck, OpenShiftCheckException, load_checks  # noqa: E402  class ActionModule(ActionBase): +    """Action plugin to execute health checks."""      def run(self, tmp=None, task_vars=None):          result = super(ActionModule, self).run(tmp, task_vars)          task_vars = task_vars or {} -        # vars are not supportably available in the callback plugin, -        # so record any it will need in the result. +        # callback plugins cannot read Ansible vars, but we would like +        # zz_failure_summary to have access to certain values. We do so by +        # storing the information we need in the result.          result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context') -        if "openshift" not in task_vars: -            result["failed"] = True -            result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?" -            return result -          try:              known_checks = self.load_known_checks(tmp, task_vars)              args = self._task.args              requested_checks = normalize(args.get('checks', [])) + +            if not requested_checks: +                result['failed'] = True +                result['msg'] = list_known_checks(known_checks) +                return result +              resolved_checks = resolve_checks(requested_checks, known_checks.values()) -        except OpenShiftCheckException as e: +        except OpenShiftCheckException as exc:              result["failed"] = True -            result["msg"] = str(e) +            result["msg"] = str(exc) +            return result + +        if "openshift" not in task_vars: +            result["failed"] = True +            result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?"              return result          result["checks"] = check_results = {}          user_disabled_checks = normalize(task_vars.get('openshift_disable_check', [])) -        for check_name in resolved_checks: -            display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"])) -            check = known_checks[check_name] - -            if not check.is_active(): -                r = dict(skipped=True, skipped_reason="Not active for this host") -            elif check_name in user_disabled_checks: -                r = dict(skipped=True, skipped_reason="Disabled by user request") -            else: -                try: -                    r = check.run() -                except OpenShiftCheckException as e: -                    r = dict( -                        failed=True, -                        msg=str(e), -                    ) - +        for name in resolved_checks: +            display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"])) +            check = known_checks[name] +            check_results[name] = run_check(name, check, user_disabled_checks)              if check.changed: -                r["changed"] = True -            check_results[check_name] = r +                check_results[name]["changed"] = True          result["changed"] = any(r.get("changed") for r in check_results.values())          if any(r.get("failed") for r in check_results.values()): @@ -80,22 +78,55 @@ class ActionModule(ActionBase):          return result      def load_known_checks(self, tmp, task_vars): +        """Find all existing checks and return a mapping of names to instances."""          load_checks()          known_checks = {}          for cls in OpenShiftCheck.subclasses(): -            check_name = cls.name -            if check_name in known_checks: -                other_cls = known_checks[check_name].__class__ +            name = cls.name +            if name in known_checks: +                other_cls = known_checks[name].__class__                  raise OpenShiftCheckException( -                    "non-unique check name '{}' in: '{}.{}' and '{}.{}'".format( -                        check_name, -                        cls.__module__, cls.__name__, -                        other_cls.__module__, other_cls.__name__)) -            known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars) +                    "duplicate check name '{}' in: '{}' and '{}'" +                    "".format(name, full_class_name(cls), full_class_name(other_cls)) +                ) +            known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)          return known_checks +def list_known_checks(known_checks): +    """Return text listing the existing checks and tags.""" +    # TODO: we could include a description of each check by taking it from a +    # check class attribute (e.g., __doc__) when building the message below. +    msg = ( +        'This playbook is meant to run health checks, but no checks were ' +        'requested. Set the `openshift_checks` variable to a comma-separated ' +        'list of check names or a YAML list. Available checks:\n  {}' +    ).format('\n  '.join(sorted(known_checks))) + +    tags = describe_tags(known_checks.values()) + +    msg += ( +        '\n\nTags can be used as a shortcut to select multiple ' +        'checks. Available tags and the checks they select:\n  {}' +    ).format('\n  '.join(tags)) + +    return msg + + +def describe_tags(check_classes): +    """Return a sorted list of strings describing tags and the checks they include.""" +    tag_checks = defaultdict(list) +    for cls in check_classes: +        for tag in cls.tags: +            tag_checks[tag].append(cls.name) +    tags = [ +        '@{} = {}'.format(tag, ','.join(sorted(checks))) +        for tag, checks in tag_checks.items() +    ] +    return sorted(tags) + +  def resolve_checks(names, all_checks):      """Returns a set of resolved check names. @@ -123,6 +154,12 @@ def resolve_checks(names, all_checks):          if unknown_tag_names:              msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))          msg.append('Make sure there is no typo in the playbook and no files are missing.') +        # TODO: implement a "Did you mean ...?" when the input is similar to a +        # valid check or tag. +        msg.append('Known checks:') +        msg.append('  {}'.format('\n  '.join(sorted(known_check_names)))) +        msg.append('Known tags:') +        msg.append('  {}'.format('\n  '.join(describe_tags(all_checks))))          raise OpenShiftCheckException('\n'.join(msg))      tag_to_checks = defaultdict(set) @@ -146,3 +183,32 @@ def normalize(checks):      if isinstance(checks, string_types):          checks = checks.split(',')      return [name.strip() for name in checks if name.strip()] + + +def run_check(name, check, user_disabled_checks): +    """Run a single check if enabled and return a result dict.""" +    if name in user_disabled_checks: +        return dict(skipped=True, skipped_reason="Disabled by user request") + +    # pylint: disable=broad-except; capturing exceptions broadly is intentional, +    # to isolate arbitrary failures in one check from others. +    try: +        is_active = check.is_active() +    except Exception as exc: +        reason = "Could not determine if check should be run, exception: {}".format(exc) +        return dict(skipped=True, skipped_reason=reason, exception=traceback.format_exc()) + +    if not is_active: +        return dict(skipped=True, skipped_reason="Not active for this host") + +    try: +        return check.run() +    except OpenShiftCheckException as exc: +        return dict(failed=True, msg=str(exc)) +    except Exception as exc: +        return dict(failed=True, msg=str(exc), exception=traceback.format_exc()) + + +def full_class_name(cls): +    """Return the name of a class prefixed with its module name.""" +    return '{}.{}'.format(cls.__module__, cls.__name__) diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index d10200719..349655966 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -1,161 +1,223 @@ -""" -Ansible callback plugin to give a nicely formatted summary of failures. -""" +"""Ansible callback plugin to print a nicely formatted summary of failures. -# Reason: In several locations below we disable pylint protected-access -#         for Ansible objects that do not give us any public way -#         to access the full details we need to report check failures. -# Status: disabled permanently or until Ansible object has a public API. -# This does leave the code more likely to be broken by future Ansible changes. +The file / module name is prefixed with `zz_` to make this plugin be loaded last +by Ansible, thus making its output the last thing that users see. +""" -from pprint import pformat +from collections import defaultdict +import traceback  from ansible.plugins.callback import CallbackBase  from ansible import constants as C  from ansible.utils.color import stringc +FAILED_NO_MSG = u'Failed without returning a message.' + +  class CallbackModule(CallbackBase): -    """ -    This callback plugin stores task results and summarizes failures. -    The file name is prefixed with `zz_` to make this plugin be loaded last by -    Ansible, thus making its output the last thing that users see. -    """ +    """This callback plugin stores task results and summarizes failures."""      CALLBACK_VERSION = 2.0      CALLBACK_TYPE = 'aggregate'      CALLBACK_NAME = 'failure_summary'      CALLBACK_NEEDS_WHITELIST = False -    _playbook_file = None      def __init__(self):          super(CallbackModule, self).__init__()          self.__failures = [] +        self.__playbook_file = ''      def v2_playbook_on_start(self, playbook):          super(CallbackModule, self).v2_playbook_on_start(playbook) -        # re: playbook attrs see top comment  # pylint: disable=protected-access -        self._playbook_file = playbook._file_name +        # pylint: disable=protected-access; Ansible gives us no public API to +        # get the file name of the current playbook from a callback plugin. +        self.__playbook_file = playbook._file_name      def v2_runner_on_failed(self, result, ignore_errors=False):          super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)          if not ignore_errors: -            self.__failures.append(dict(result=result, ignore_errors=ignore_errors)) +            self.__failures.append(result)      def v2_playbook_on_stats(self, stats):          super(CallbackModule, self).v2_playbook_on_stats(stats) -        if self.__failures: -            self._print_failure_details(self.__failures) - -    def _print_failure_details(self, failures): -        """Print a summary of failed tasks or checks.""" -        self._display.display(u'\nFailure summary:\n') - -        width = len(str(len(failures))) -        initial_indent_format = u'  {{:>{width}}}. '.format(width=width) -        initial_indent_len = len(initial_indent_format.format(0)) -        subsequent_indent = u' ' * initial_indent_len -        subsequent_extra_indent = u' ' * (initial_indent_len + 10) - -        for i, failure in enumerate(failures, 1): -            entries = _format_failure(failure) -            self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), entries[0])) -            for entry in entries[1:]: -                entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent) -                indented = u'{}{}'.format(subsequent_indent, entry) -                self._display.display(indented) - -        failed_checks = set() -        playbook_context = None -        # re: result attrs see top comment  # pylint: disable=protected-access -        for failure in failures: -            # Get context from check task result since callback plugins cannot access task vars. -            # NOTE: thus context is not known unless checks run. Failures prior to checks running -            # don't have playbook_context in the results. But we only use it now when checks fail. -            playbook_context = playbook_context or failure['result']._result.get('playbook_context') -            failed_checks.update( -                name -                for name, result in failure['result']._result.get('checks', {}).items() -                if result.get('failed') -            ) -        if failed_checks: -            self._print_check_failure_summary(failed_checks, playbook_context) - -    def _print_check_failure_summary(self, failed_checks, context): -        checks = ','.join(sorted(failed_checks)) -        # The purpose of specifying context is to vary the output depending on what the user was -        # expecting to happen (based on which playbook they ran). The only use currently is to -        # vary the message depending on whether the user was deliberately running checks or was -        # trying to install/upgrade and checks are just included. Other use cases may arise. -        summary = (  # default to explaining what checks are in the first place -            '\n' -            'The execution of "{playbook}"\n' -            'includes checks designed to fail early if the requirements\n' -            'of the playbook are not met. One or more of these checks\n' -            'failed. To disregard these results, you may choose to\n' -            'disable failing checks by setting an Ansible variable:\n\n' -            '   openshift_disable_check={checks}\n\n' -            'Failing check names are shown in the failure details above.\n' -            'Some checks may be configurable by variables if your requirements\n' -            'are different from the defaults; consult check documentation.\n' -            'Variables can be set in the inventory or passed on the\n' -            'command line using the -e flag to ansible-playbook.\n\n' -        ).format(playbook=self._playbook_file, checks=checks) -        if context in ['pre-install', 'health']: -            summary = (  # user was expecting to run checks, less explanation needed -                '\n' -                'You may choose to configure or disable failing checks by\n' -                'setting Ansible variables. To disable those above:\n\n' -                '    openshift_disable_check={checks}\n\n' -                'Consult check documentation for configurable variables.\n' -                'Variables can be set in the inventory or passed on the\n' -                'command line using the -e flag to ansible-playbook.\n\n' -            ).format(checks=checks) -        self._display.display(summary) - - -# re: result attrs see top comment  # pylint: disable=protected-access -def _format_failure(failure): +        # pylint: disable=broad-except; capturing exceptions broadly is +        # intentional, to isolate arbitrary failures in this callback plugin. +        try: +            if self.__failures: +                self._display.display(failure_summary(self.__failures, self.__playbook_file)) +        except Exception: +            msg = stringc( +                u'An error happened while generating a summary of failures:\n' +                u'{}'.format(traceback.format_exc()), C.COLOR_WARN) +            self._display.v(msg) + + +def failure_summary(failures, playbook): +    """Return a summary of failed tasks, including details on health checks.""" +    if not failures: +        return u'' + +    # NOTE: because we don't have access to task_vars from callback plugins, we +    # store the playbook context in the task result when the +    # openshift_health_check action plugin is used, and we use this context to +    # customize the error message. +    # pylint: disable=protected-access; Ansible gives us no sufficient public +    # API on TaskResult objects. +    context = next(( +        context for context in +        (failure._result.get('playbook_context') for failure in failures) +        if context +    ), None) + +    failures = [failure_to_dict(failure) for failure in failures] +    failures = deduplicate_failures(failures) + +    summary = [u'', u'', u'Failure summary:', u''] + +    width = len(str(len(failures))) +    initial_indent_format = u'  {{:>{width}}}. '.format(width=width) +    initial_indent_len = len(initial_indent_format.format(0)) +    subsequent_indent = u' ' * initial_indent_len +    subsequent_extra_indent = u' ' * (initial_indent_len + 10) + +    for i, failure in enumerate(failures, 1): +        entries = format_failure(failure) +        summary.append(u'\n{}{}'.format(initial_indent_format.format(i), entries[0])) +        for entry in entries[1:]: +            entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent) +            indented = u'{}{}'.format(subsequent_indent, entry) +            summary.append(indented) + +    failed_checks = set() +    for failure in failures: +        failed_checks.update(name for name, message in failure['checks']) +    if failed_checks: +        summary.append(check_failure_footer(failed_checks, context, playbook)) + +    return u'\n'.join(summary) + + +def failure_to_dict(failed_task_result): +    """Extract information out of a failed TaskResult into a dict. + +    The intent is to transform a TaskResult object into something easier to +    manipulate. TaskResult is ansible.executor.task_result.TaskResult. +    """ +    # pylint: disable=protected-access; Ansible gives us no sufficient public +    # API on TaskResult objects. +    _result = failed_task_result._result +    return { +        'host': failed_task_result._host.get_name(), +        'play': play_name(failed_task_result._task), +        'task': failed_task_result.task_name, +        'msg': _result.get('msg', FAILED_NO_MSG), +        'checks': tuple( +            (name, result.get('msg', FAILED_NO_MSG)) +            for name, result in sorted(_result.get('checks', {}).items()) +            if result.get('failed') +        ), +    } + + +def play_name(obj): +    """Given a task or block, return the name of its parent play. + +    This is loosely inspired by ansible.playbook.base.Base.dump_me. +    """ +    # pylint: disable=protected-access; Ansible gives us no sufficient public +    # API to implement this. +    if not obj: +        return '' +    if hasattr(obj, '_play'): +        return obj._play.get_name() +    return play_name(getattr(obj, '_parent')) + + +def deduplicate_failures(failures): +    """Group together similar failures from different hosts. + +    Returns a new list of failures such that identical failures from different +    hosts are grouped together in a single entry. The relative order of failures +    is preserved. +    """ +    groups = defaultdict(list) +    for failure in failures: +        group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) +        groups[group_key].append(failure) +    result = [] +    for failure in failures: +        group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) +        if group_key not in groups: +            continue +        failure['host'] = tuple(sorted(g_failure['host'] for g_failure in groups.pop(group_key))) +        result.append(failure) +    return result + + +def format_failure(failure):      """Return a list of pretty-formatted text entries describing a failure, including      relevant information about it. Expect that the list of text entries will be joined      by a newline separator when output to the user.""" -    result = failure['result'] -    host = result._host.get_name() -    play = _get_play(result._task) -    if play: -        play = play.get_name() -    task = result._task.get_name() -    msg = result._result.get('msg', u'???') +    host = u', '.join(failure['host']) +    play = failure['play'] +    task = failure['task'] +    msg = failure['msg'] +    checks = failure['checks']      fields = ( -        (u'Host', host), +        (u'Hosts', host),          (u'Play', play),          (u'Task', task),          (u'Message', stringc(msg, C.COLOR_ERROR)),      ) -    if 'checks' in result._result: -        fields += ((u'Details', _format_failed_checks(result._result['checks'])),) +    if checks: +        fields += ((u'Details', format_failed_checks(checks)),)      row_format = '{:10}{}'      return [row_format.format(header + u':', body) for header, body in fields] -def _format_failed_checks(checks): +def format_failed_checks(checks):      """Return pretty-formatted text describing checks that failed.""" -    failed_check_msgs = [] -    for check, body in checks.items(): -        if body.get('failed', False):   # only show the failed checks -            msg = body.get('msg', u"Failed without returning a message") -            failed_check_msgs.append('check "%s":\n%s' % (check, msg)) -    if failed_check_msgs: -        return stringc("\n\n".join(failed_check_msgs), C.COLOR_ERROR) -    else:    # something failed but no checks will admit to it, so dump everything -        return stringc(pformat(checks), C.COLOR_ERROR) - - -# This is inspired by ansible.playbook.base.Base.dump_me. -# re: play/task/block attrs see top comment  # pylint: disable=protected-access -def _get_play(obj): -    """Given a task or block, recursively try to find its parent play.""" -    if hasattr(obj, '_play'): -        return obj._play -    if getattr(obj, '_parent'): -        return _get_play(obj._parent) +    messages = [] +    for name, message in checks: +        messages.append(u'check "{}":\n{}'.format(name, message)) +    return stringc(u'\n\n'.join(messages), C.COLOR_ERROR) + + +def check_failure_footer(failed_checks, context, playbook): +    """Return a textual explanation about checks depending on context. + +    The purpose of specifying context is to vary the output depending on what +    the user was expecting to happen (based on which playbook they ran). The +    only use currently is to vary the message depending on whether the user was +    deliberately running checks or was trying to install/upgrade and checks are +    just included. Other use cases may arise. +    """ +    checks = ','.join(sorted(failed_checks)) +    summary = [u''] +    if context in ['pre-install', 'health', 'adhoc']: +        # User was expecting to run checks, less explanation needed. +        summary.extend([ +            u'You may configure or disable checks by setting Ansible ' +            u'variables. To disable those above, set:', +            u'    openshift_disable_check={checks}'.format(checks=checks), +            u'Consult check documentation for configurable variables.', +        ]) +    else: +        # User may not be familiar with the checks, explain what checks are in +        # the first place. +        summary.extend([ +            u'The execution of "{playbook}" includes checks designed to fail ' +            u'early if the requirements of the playbook are not met. One or ' +            u'more of these checks failed. To disregard these results,' +            u'explicitly disable checks by setting an Ansible variable:'.format(playbook=playbook), +            u'   openshift_disable_check={checks}'.format(checks=checks), +            u'Failing check names are shown in the failure details above. ' +            u'Some checks may be configurable by variables if your requirements ' +            u'are different from the defaults; consult check documentation.', +        ]) +    summary.append( +        u'Variables can be set in the inventory or passed on the command line ' +        u'using the -e flag to ansible-playbook.' +    ) +    return u'\n'.join(summary) diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index f5161d6f5..c109ebd24 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -80,7 +80,8 @@ def skipped(result):      None,      {},  ]) -def test_action_plugin_missing_openshift_facts(plugin, task_vars): +def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch): +    monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])      result = plugin.run(tmp=None, task_vars=task_vars)      assert failed(result, msg_has=['openshift_facts']) @@ -94,7 +95,7 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,      result = plugin.run(tmp=None, task_vars=task_vars) -    assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck']) +    assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])  def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch): @@ -217,24 +218,21 @@ def test_resolve_checks_ok(names, all_checks, expected):      assert resolve_checks(names, all_checks) == expected -@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [ +@pytest.mark.parametrize('names,all_checks,words_in_exception', [      (          ['testA', 'testB'],          [],          ['check', 'name', 'testA', 'testB'], -        ['tag', 'group', '@'],      ),      (          ['@group'],          [],          ['tag', 'name', 'group'], -        ['check', '@'],      ),      (          ['testA', 'testB', '@group'],          [],          ['check', 'name', 'testA', 'testB', 'tag', 'group'], -        ['@'],      ),      (          ['testA', 'testB', '@group'], @@ -244,13 +242,10 @@ def test_resolve_checks_ok(names, all_checks, expected):              fake_check('from_group_2', ['preflight', 'group']),          ],          ['check', 'name', 'testA', 'testB'], -        ['tag', 'group', '@'],      ),  ]) -def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception): +def test_resolve_checks_failure(names, all_checks, words_in_exception):      with pytest.raises(Exception) as excinfo:          resolve_checks(names, all_checks)      for word in words_in_exception:          assert word in str(excinfo.value) -    for word in words_not_in_exception: -        assert word not in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py index 3cbd65507..244a1f0fa 100644 --- a/roles/openshift_health_checker/test/conftest.py +++ b/roles/openshift_health_checker/test/conftest.py @@ -7,5 +7,6 @@ openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))  sys.path[1:1] = [      openshift_health_checker_path,      os.path.join(openshift_health_checker_path, 'action_plugins'), +    os.path.join(openshift_health_checker_path, 'callback_plugins'),      os.path.join(openshift_health_checker_path, 'library'),  ] diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py new file mode 100644 index 000000000..0fc258133 --- /dev/null +++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py @@ -0,0 +1,70 @@ +from zz_failure_summary import deduplicate_failures + +import pytest + + +@pytest.mark.parametrize('failures,deduplicated', [ +    ( +        [ +            { +                'host': 'master1', +                'msg': 'One or more checks failed', +            }, +        ], +        [ +            { +                'host': ('master1',), +                'msg': 'One or more checks failed', +            }, +        ], +    ), +    ( +        [ +            { +                'host': 'master1', +                'msg': 'One or more checks failed', +            }, +            { +                'host': 'node1', +                'msg': 'One or more checks failed', +            }, +        ], +        [ +            { +                'host': ('master1', 'node1'), +                'msg': 'One or more checks failed', +            }, +        ], +    ), +    ( +        [ +            { +                'host': 'node1', +                'msg': 'One or more checks failed', +                'checks': (('test_check', 'error message'),), +            }, +            { +                'host': 'master2', +                'msg': 'Some error happened', +            }, +            { +                'host': 'master1', +                'msg': 'One or more checks failed', +                'checks': (('test_check', 'error message'),), +            }, +        ], +        [ +            { +                'host': ('master1', 'node1'), +                'msg': 'One or more checks failed', +                'checks': (('test_check', 'error message'),), +            }, +            { +                'host': ('master2',), +                'msg': 'Some error happened', +            }, +        ], +    ), +]) +def test_deduplicate_failures(failures, deduplicated): +    assert deduplicate_failures(failures) == deduplicated diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index f0e303e43..f3747eead 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -1,9 +1,9 @@  --- -r_openshift_hosted_router_firewall_enabled: True -r_openshift_hosted_router_use_firewalld: False +r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -r_openshift_hosted_registry_firewall_enabled: True -r_openshift_hosted_registry_use_firewalld: False +r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  openshift_hosted_router_wait: True  openshift_hosted_registry_wait: True diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml index a18e6eea9..a8a6f6fc8 100644 --- a/roles/openshift_hosted/tasks/registry/secure.yml +++ b/roles/openshift_hosted/tasks/registry/secure.yml @@ -37,6 +37,9 @@      hostnames:      - "{{ docker_registry_service.results.clusterip }}"      - "{{ docker_registry_route.results[0].spec.host }}" +    - "{{ openshift_hosted_registry_name }}.default.svc" +    - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}" +    - "{{ openshift_hosted_registry_routehost }}"      cert: "{{ docker_registry_cert_path }}"      key: "{{ docker_registry_key_path }}"      expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}" diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 3f6409233..41a2b12a2 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_loadbalancer_firewall_enabled: True -r_openshift_loadbalancer_use_firewalld: False +r_openshift_loadbalancer_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_loadbalancer_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  haproxy_frontends:  - name: main diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 84ead3548..70aef02cd 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -22,7 +22,19 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log  ###Optional vars:  - `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'. +- `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_elasticsearch_image_prefix`: Setting the image prefix for Elasticsearch image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_fluentd_image_prefix`: Setting the image prefix for Fluentd image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_kibana_image_prefix`: Setting the image prefix for Kibana image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_kibana_proxy_image_prefix`: Setting the image prefix for Kibana proxy image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_mux_image_prefix`: Setting the image prefix for Mux image. Defaults to `openshift_logging_image_prefix`.  - `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'. +- `openshift_logging_curator_image_version`: Setting the image version for Curator image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_elasticsearch_image_version`: Setting the image version for Elasticsearch image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_fluentd_image_version`: Setting the image version for Fluentd image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_kibana_image_version`: Setting the image version for Kibana image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_kibana_proxy_image_version`: Setting the image version for Kibana proxy image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_mux_image_version`: Setting the image version for Mux image. Defaults to `openshift_logging_image_version`.  - `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.  - `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'.  - `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:{{openshift.master.api_port}}'. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 8b0f4cb62..f07d7e6da 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -84,7 +84,7 @@ openshift_logging_es_ca: /etc/fluent/keys/ca  openshift_logging_es_client_cert: /etc/fluent/keys/cert  openshift_logging_es_client_key: /etc/fluent/keys/key  openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" -openshift_logging_es_cpu_limit: null +openshift_logging_es_cpu_limit: 1000m  # the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'  openshift_logging_es_log_appenders: ['file']  openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}" @@ -125,7 +125,7 @@ openshift_logging_es_ops_ca: /etc/fluent/keys/ca  openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert  openshift_logging_es_ops_client_key: /etc/fluent/keys/key  openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" -openshift_logging_es_ops_cpu_limit: null +openshift_logging_es_ops_cpu_limit: 1000m  openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"  openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"  openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index d4a305fb8..f8553be79 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -185,8 +185,6 @@      openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"      openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" -    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}"      openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}"      openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}" @@ -201,8 +199,6 @@      openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"      openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" -    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"      openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"      openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}" @@ -230,8 +226,6 @@      openshift_logging_curator_es_host: "{{ openshift_logging_es_host }}"      openshift_logging_curator_es_port: "{{ openshift_logging_es_port }}"      openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" -    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"  - include_role: @@ -243,8 +237,6 @@      openshift_logging_curator_es_port: "{{ openshift_logging_es_ops_port }}"      openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" -    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"      openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"      openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}" @@ -260,8 +252,6 @@      openshift_logging_mux_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"      openshift_logging_mux_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_mux_master_url: "{{ openshift_logging_master_url }}" -    openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_mux_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_mux_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"    when:    - openshift_logging_use_mux | bool @@ -273,8 +263,6 @@    vars:      generated_certs_dir: "{{openshift.common.config_base}}/logging"      openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}" -    openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_fluentd_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"      openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"      openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}" diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml index 82ffb2f93..17807b644 100644 --- a/roles/openshift_logging_curator/defaults/main.yml +++ b/roles/openshift_logging_curator/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### General logging settings -openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local" diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index 3113fb3c9..6e8fab2b5 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -86,7 +86,7 @@      component: "{{ curator_component }}"      logging_component: curator      deploy_name: "{{ curator_name }}" -    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" +    image: "{{openshift_logging_curator_image_prefix}}logging-curator:{{openshift_logging_curator_image_version}}"      es_host: "{{ openshift_logging_curator_es_host }}"      es_port: "{{ openshift_logging_curator_es_port }}"      curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 0690bf114..75bd479be 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### Common settings -openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_elasticsearch_namespace: logging diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 28c3ffd96..931846fdb 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -277,7 +277,7 @@      component: "{{ es_component }}"      logging_component: elasticsearch      deploy_name: "{{ es_deploy_name }}" -    image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}" +    image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"      es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"      es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"      es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index a53bbd2df..30d3d854a 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### General logging settings -openshift_logging_fluentd_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_fluentd_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"  openshift_logging_fluentd_namespace: logging diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index 39dffba19..88e039e3f 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -28,7 +28,7 @@ spec:          {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}"        containers:        - name: "{{ daemonset_container_name }}" -        image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}" +        image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}"          imagePullPolicy: Always          securityContext:            privileged: true diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 14787a62b..ee265bb14 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -2,8 +2,8 @@  ### Common settings  openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local"  openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local" -openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_kibana_namespace: logging @@ -24,6 +24,8 @@ openshift_logging_kibana_edge_term_policy: Redirect  openshift_logging_kibana_ops_deployment: false  # Proxy settings +openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_kibana_proxy_debug: false  openshift_logging_kibana_proxy_cpu_limit: null  openshift_logging_kibana_proxy_memory_limit: 256Mi diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 166f102f7..e17e8c1f2 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -225,8 +225,8 @@      component: "{{ kibana_component }}"      logging_component: kibana      deploy_name: "{{ kibana_name }}" -    image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}" -    proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}" +    image: "{{ openshift_logging_kibana_image_prefix }}logging-kibana:{{ openshift_logging_kibana_image_version }}" +    proxy_image: "{{ openshift_logging_kibana_proxy_image_prefix }}logging-auth-proxy:{{ openshift_logging_kibana_proxy_image_version }}"      es_host: "{{ openshift_logging_kibana_es_host }}"      es_port: "{{ openshift_logging_kibana_es_port }}"      kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}" diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 7a3da9b4c..68412aec8 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### General logging settings -openshift_logging_mux_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_mux_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"  openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 8ec93de7d..2ec863afa 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -165,7 +165,7 @@      component: mux      logging_component: mux      deploy_name: "logging-{{ component }}" -    image: "{{ openshift_logging_image_prefix }}logging-fluentd:{{ openshift_logging_image_version }}" +    image: "{{ openshift_logging_mux_image_prefix }}logging-fluentd:{{ openshift_logging_mux_image_version }}"      es_host: "{{ openshift_logging_mux_app_host }}"      es_port: "{{ openshift_logging_mux_app_port }}"      ops_host: "{{ openshift_logging_mux_ops_host }}" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index a4c178908..cbc879d31 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_master_firewall_enabled: True -r_openshift_master_use_firewalld: False +r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  openshift_node_ips: []  r_openshift_master_clean_install: false diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 973b3a619..c7867d225 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_node_firewall_enabled: True -r_openshift_node_use_firewalld: False +r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_openshift_node_os_firewall_deny: []  r_openshift_node_os_firewall_allow:  - service: Kubernetes kubelet diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index fa0d4e323..da16e7592 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -70,25 +70,15 @@      - openshift_disable_swap | default(true) | bool  # End Disable Swap Block -# We have to add tuned-profiles in the same transaction otherwise we run into depsolving -# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.  - name: Install Node package    package: -    name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +    name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"      state: present    when: not openshift.common.is_containerized | bool -- name: Check for tuned package -  command: rpm -q tuned -  args: -    warn: no -  register: tuned_installed -  changed_when: false -  failed_when: false - -- name: Set atomic-guest tuned profile -  command: "tuned-adm profile atomic-guest" -  when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool +- name: setup tuned +  include: tuned.yml +  static: yes  - name: Install sdn-ovs package    package: diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/openshift_node/tasks/tuned.yml new file mode 100644 index 000000000..425bf6a26 --- /dev/null +++ b/roles/openshift_node/tasks/tuned.yml @@ -0,0 +1,41 @@ +--- +- name: Check for tuned package +  command: rpm -q tuned +  args: +    warn: no +  register: tuned_installed +  changed_when: false +  failed_when: false + +- name: Tuned service setup +  block: +  - name: Set tuned OpenShift variables +    set_fact: +      openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" +      tuned_etc_directory: '/etc/tuned' +      tuned_templates_source: '../templates/tuned' + +  - name: Ensure directory structure exists +    file: +      state: directory +      dest: '{{ tuned_etc_directory }}/{{ item.path }}' +    with_filetree: '{{ tuned_templates_source }}' +    when: item.state == 'directory' + +  - name: Ensure files are populated from templates +    template: +      src: '{{ item.src }}' +      dest: '{{ tuned_etc_directory }}/{{ item.path }}' +    with_filetree: '{{ tuned_templates_source }}' +    when: item.state == 'file' + +  - name: Make tuned use the recommended tuned profile on restart +    file: path=/etc/tuned/active_profile state=absent + +  - name: Restart tuned service +    systemd: +      state: restarted +      daemon_reload: yes +      name: tuned + +  when: tuned_installed.rc == 0 | bool diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index a5887465e..8d21a3f27 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -52,8 +52,8 @@ openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glus  openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"  openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"  openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile | default(omit) }}" -r_openshift_master_firewall_enabled: True -r_openshift_master_use_firewalld: False +r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_openshift_storage_glusterfs_os_firewall_deny: []  r_openshift_storage_glusterfs_os_firewall_allow:  - service: glusterfs_sshd diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml index 4a2bc6141..e7e0b331b 100644 --- a/roles/openshift_storage_nfs/defaults/main.yml +++ b/roles/openshift_storage_nfs/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_storage_nfs_firewall_enabled: True -r_openshift_storage_nfs_use_firewalld: False +r_openshift_storage_nfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_storage_nfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_openshift_storage_nfs_os_firewall_deny: []  r_openshift_storage_nfs_os_firewall_allow: diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index a6b8a40c8..c0ea00f34 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -60,13 +60,16 @@    # It also allows for optional trailing data which:    # - must start with a dash    # - may contain numbers +  # - may containe dots (https://github.com/openshift/openshift-ansible/issues/5192) +  #    - name: (Enterprise) Verify openshift_image_tag is valid      when: openshift.common.deployment_type == 'openshift-enterprise'      assert:        that: -      - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}" +      - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"        msg: |- -        openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4 +        openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, +        v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6          You specified openshift_image_tag={{ openshift_image_tag }}  # Make sure we copy this to a fact if given a var: diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml index f96a80f1c..2cae94411 100644 --- a/roles/os_firewall/defaults/main.yml +++ b/roles/os_firewall/defaults/main.yml @@ -2,4 +2,4 @@  os_firewall_enabled: True  # firewalld is not supported on Atomic Host  # https://bugzilla.redhat.com/show_bug.cgi?id=1403331 -os_firewall_use_firewalld: "{{ False }}" +os_firewall_use_firewalld: False  | 
