diff options
Diffstat (limited to 'roles')
35 files changed, 457 insertions, 122 deletions
diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml index 7ba5bd485..937d94209 100644 --- a/roles/haproxy/defaults/main.yml +++ b/roles/haproxy/defaults/main.yml @@ -1,4 +1,6 @@  --- +haproxy_frontend_port: 80 +  haproxy_frontends:  - name: main    binds: @@ -18,4 +20,4 @@ os_firewall_allow:  - service: haproxy stats    port: "9000/tcp"  - service: haproxy balance -  port: "8443/tcp" +  port: "{{ haproxy_frontend_port }}/tcp" diff --git a/roles/nuage_master/README.md b/roles/nuage_master/README.md new file mode 100644 index 000000000..de101dd19 --- /dev/null +++ b/roles/nuage_master/README.md @@ -0,0 +1,8 @@ +Nuage Master +============ +Setup Nuage Kubernetes Monitor on the Master node + + +Requirements +------------ +This role assumes it has been deployed on RHEL/Fedora diff --git a/roles/nuage_master/files/serviceaccount.sh b/roles/nuage_master/files/serviceaccount.sh new file mode 100644 index 000000000..f6fdb8a8d --- /dev/null +++ b/roles/nuage_master/files/serviceaccount.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# Parse CLI options +for i in "$@"; do +    case $i in +        --master-cert-dir=*) +            MASTER_DIR="${i#*=}" +            CA_CERT=${MASTER_DIR}/ca.crt +            CA_KEY=${MASTER_DIR}/ca.key +            CA_SERIAL=${MASTER_DIR}/ca.serial.txt +            ADMIN_FILE=${MASTER_DIR}/admin.kubeconfig +        ;; +        --server=*) +            SERVER="${i#*=}" +        ;; +        --output-cert-dir=*) +            OUTDIR="${i#*=}" +            CONFIG_FILE=${OUTDIR}/nuage.kubeconfig +        ;; +    esac +done + +# If any are missing, print the usage and exit +if [ -z $SERVER ] || [ -z $OUTDIR ] || [ -z $MASTER_DIR ]; then +    echo "Invalid syntax: $@" +    echo "Usage:" +    echo "  $0 --server=<address>:<port> --output-cert-dir=/path/to/output/dir/ --master-cert-dir=/path/to/master/" +    echo "--master-cert-dir:  Directory where the master's configuration is held" +    echo "--server:           Address of Kubernetes API server (default port is 8443)" +    echo "--output-cert-dir:  Directory to put artifacts in" +    echo "" +    echo "All options are required" +    exit 1 +fi + +# Login as admin so that we can create the service account +oc login -u system:admin --config=$ADMIN_FILE || exit 1 +oc project default --config=$ADMIN_FILE + +ACCOUNT_CONFIG=' +{ +  "apiVersion": "v1", +  "kind": "ServiceAccount", +  "metadata": { +    "name": "nuage" +  } +} +' + +# Create the account with the included info +echo $ACCOUNT_CONFIG|oc create --config=$ADMIN_FILE -f - + +# Add the cluser-reader role, which allows this service account read access to +# everything in the cluster except secrets +oadm policy add-cluster-role-to-user cluster-reader system:serviceaccounts:default:nuage --config=$ADMIN_FILE + +# Generate certificates and a kubeconfig for the service account +oadm create-api-client-config --certificate-authority=${CA_CERT} --client-dir=${OUTDIR} --signer-cert=${CA_CERT} --signer-key=${CA_KEY} --signer-serial=${CA_SERIAL} --user=system:serviceaccounts:default:nuage --master=${SERVER} --public-master=${SERVER} --basename='nuage' + +# Verify the finalized kubeconfig +if ! [ $(oc whoami --config=$CONFIG_FILE) == 'system:serviceaccounts:default:nuage' ]; then +    echo "Service account creation failed!" +    exit 1 +fi diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml new file mode 100644 index 000000000..635d8a419 --- /dev/null +++ b/roles/nuage_master/handlers/main.yaml @@ -0,0 +1,18 @@ +--- +- name: restart nuagekubemon +  sudo: true +  service: name=nuagekubemon state=restarted + +- name: restart master +  service: name={{ openshift.common.service_type }}-master state=restarted +  when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false)) + +- name: restart master api +  service: name={{ openshift.common.service_type }}-master-api state=restarted +  when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' + +# TODO: need to fix up ignore_errors here +- name: restart master controllers +  service: name={{ openshift.common.service_type }}-master-controllers state=restarted +  when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' +  ignore_errors: yes diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml new file mode 100644 index 000000000..a7baadc76 --- /dev/null +++ b/roles/nuage_master/tasks/main.yaml @@ -0,0 +1,34 @@ +--- +- name: Create directory /usr/share/nuagekubemon +  sudo: true +  file: path=/usr/share/nuagekubemon state=directory   + +- name: Create the log directory +  sudo: true +  file: path={{ nuagekubemon_log_dir }} state=directory + +- name: Install Nuage Kubemon +  sudo: true +  yum: name={{ nuage_kubemon_rpm }} state=present + +- name: Run the service account creation script +  sudo: true +  script: serviceaccount.sh --server={{ openshift.master.api_url }} --output-cert-dir={{ cert_output_dir }} --master-cert-dir={{ openshift_master_config_dir }} + +- name: Download the certs and keys +  sudo: true +  fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes +  with_items: +        - ca.crt +        - nuage.crt +        - nuage.key +        - nuage.kubeconfig  +  +- name: Create nuagekubemon.yaml +  sudo: true +  template: src=nuagekubemon.j2 dest=/usr/share/nuagekubemon/nuagekubemon.yaml owner=root mode=0644 +  notify: +    - restart master +    - restart master api +    - restart master controllers +    - restart nuagekubemon diff --git a/roles/nuage_master/templates/nuagekubemon.j2 b/roles/nuage_master/templates/nuagekubemon.j2 new file mode 100644 index 000000000..fb586bcee --- /dev/null +++ b/roles/nuage_master/templates/nuagekubemon.j2 @@ -0,0 +1,19 @@ +# .kubeconfig that includes the nuage service account +kubeConfig: {{ kube_config }} +# name of the nuage service account, or another account with 'cluster-reader' +# permissions +# Openshift master config file +openshiftMasterConfig: {{ master_config_yaml }}  +# URL of the VSD Architect +vsdApiUrl: {{ vsd_api_url }}  +# API version to query against.  Usually "v3_2" +vspVersion: {{ vsp_version }}  +# File containing a VSP license to install.  Only necessary if no license has +# been installed on the VSD Architect before, only valid for standalone vsd install +# licenseFile: "/path/to/base_vsp_license.txt" +# Name of the enterprise in which pods will reside +enterpriseName: {{ enterprise }}  +# Name of the domain in which pods will reside +domainName: {{ domain }} +# Location where logs should be saved +log_dir: {{ nuagekubemon_log_dir }}  diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml new file mode 100644 index 000000000..db901fea6 --- /dev/null +++ b/roles/nuage_master/vars/main.yaml @@ -0,0 +1,7 @@ +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +ca_cert: "{{ openshift_master_config_dir }}/ca.crt" +admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig" +cert_output_dir: /usr/share/nuagekubemon +kube_config: /usr/share/nuagekubemon/nuage.kubeconfig +kubemon_yaml: /usr/share/nuagekubemon/nuagekubemon.yaml  +master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"  diff --git a/roles/nuage_node/README.md b/roles/nuage_node/README.md new file mode 100644 index 000000000..02a3cbc77 --- /dev/null +++ b/roles/nuage_node/README.md @@ -0,0 +1,9 @@ +Nuage Node +========== + +Setup Nuage VRS (Virtual Routing Switching) on the Openshift Node + +Requirements +------------ + +This role assumes it has been deployed on RHEL/Fedora diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml new file mode 100644 index 000000000..d82d4b67b --- /dev/null +++ b/roles/nuage_node/handlers/main.yaml @@ -0,0 +1,4 @@ +--- +- name: restart vrs +  sudo: true +  service: name=openvswitch state=restarted diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml new file mode 100644 index 000000000..e0117bf71 --- /dev/null +++ b/roles/nuage_node/tasks/main.yaml @@ -0,0 +1,37 @@ +--- +- name: Install Nuage VRS +  sudo: true +  yum: name={{ vrs_rpm }} state=present +   +- name: Set the uplink interface  +  sudo: true +  lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}' + +- name: Set the Active Controller  +  sudo: true +  lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}' + +- name: Set the Standby Controller  +  sudo: true +  lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}' +  when: vsc_standby_ip is defined + +- name: Install plugin rpm +  sudo: true +  yum: name={{ plugin_rpm }} state=present + +- name: Copy the certificates and keys +  sudo: true +  copy: src="/tmp/{{ item }}" dest="{{ vsp_k8s_dir }}/{{ item }}" +  with_items: +        - ca.crt +        - nuage.crt +        - nuage.key +        - nuage.kubeconfig  + +- name: Set the vsp-k8s.yaml  +  sudo: true +  template: src=vsp-k8s.j2 dest={{ vsp_k8s_yaml }} owner=root mode=0644  +  notify: +    - restart vrs +    - restart node  diff --git a/roles/nuage_node/templates/vsp-k8s.j2 b/roles/nuage_node/templates/vsp-k8s.j2 new file mode 100644 index 000000000..98d6c3a9c --- /dev/null +++ b/roles/nuage_node/templates/vsp-k8s.j2 @@ -0,0 +1,14 @@ +clientCert: {{ client_cert }}  +# The key to the certificate in clientCert above +clientKey: {{ client_key }} +# The certificate authority's certificate for the local kubelet.  Usually the +# same as the CA cert used to create the client Cert/Key pair. +CACert: {{ ca_cert }}  +# Name of the enterprise in which pods will reside +enterpriseName: {{ enterprise }}  +# Name of the domain in which pods will reside +domainName: {{ domain }} +# IP address and port number of master API server +masterApiServer: {{ api_server }} +# Bridge name for the docker bridge +dockerBridgeName: {{ docker_bridge }} diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml new file mode 100644 index 000000000..a6b7cf997 --- /dev/null +++ b/roles/nuage_node/vars/main.yaml @@ -0,0 +1,9 @@ +--- +vrs_config: /etc/default/openvswitch +vsp_k8s_dir: /usr/share/vsp-k8s +vsp_k8s_yaml: "{{ vsp_k8s_dir }}/vsp-k8s.yaml" +client_cert: "{{ vsp_k8s_dir }}/nuage.crt" +client_key: "{{ vsp_k8s_dir }}/nuage.key" +ca_cert: "{{ vsp_k8s_dir }}/ca.crt" +api_server: "{{ openshift_node_master_api_url }}" +docker_bridge: "docker0" diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 3a2ccb59a..c9f745ed2 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -4,6 +4,14 @@    when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool  - fail: +   msg: Nuage sdn can not be used with openshift sdn +  when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_nuage | default(false) | bool + +- fail: +   msg: Nuage sdn can not be used with flannel  +  when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool + +- fail:      msg: openshift_hostname must be 64 characters or less    when: openshift_hostname is defined and openshift_hostname | length > 64 @@ -23,6 +31,7 @@        deployment_type: "{{ openshift_deployment_type }}"        use_fluentd: "{{ openshift_use_fluentd | default(None) }}"        use_flannel: "{{ openshift_use_flannel | default(None) }}" +      use_nuage: "{{ openshift_use_nuage | default(None) }}"        use_manageiq: "{{ openshift_use_manageiq | default(None) }}"  - name: Install the base package for versioning diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 831d78b83..40e54d706 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -338,6 +338,23 @@ def set_flannel_facts_if_unset(facts):              facts['common']['use_flannel'] = use_flannel      return facts +def set_nuage_facts_if_unset(facts): +    """ Set nuage facts if not already present in facts dict +            dict: the facts dict updated with the nuage facts if +            missing +        Args: +            facts (dict): existing facts +        Returns: +            dict: the facts dict updated with the nuage +            facts if they were not already present + +    """ +    if 'common' in facts: +        if 'use_nuage' not in facts['common']: +            use_nuage = False +            facts['common']['use_nuage'] = use_nuage +    return facts +  def set_node_schedulability(facts):      """ Set schedulable facts if not already present in facts dict          Args: @@ -458,52 +475,68 @@ def set_url_facts_if_unset(facts):                    were not already present      """      if 'master' in facts: -        api_use_ssl = facts['master']['api_use_ssl'] -        api_port = facts['master']['api_port'] -        console_use_ssl = facts['master']['console_use_ssl'] -        console_port = facts['master']['console_port'] -        console_path = facts['master']['console_path'] -        etcd_use_ssl = facts['master']['etcd_use_ssl'] -        etcd_hosts = facts['master']['etcd_hosts'] -        etcd_port = facts['master']['etcd_port']          hostname = facts['common']['hostname'] -        public_hostname = facts['common']['public_hostname']          cluster_hostname = facts['master'].get('cluster_hostname')          cluster_public_hostname = facts['master'].get('cluster_public_hostname') +        public_hostname = facts['common']['public_hostname'] +        api_hostname = cluster_hostname if cluster_hostname else hostname +        api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname +        console_path = facts['master']['console_path'] +        etcd_hosts = facts['master']['etcd_hosts'] + +        use_ssl = dict( +            api=facts['master']['api_use_ssl'], +            public_api=facts['master']['api_use_ssl'], +            loopback_api=facts['master']['api_use_ssl'], +            console=facts['master']['console_use_ssl'], +            public_console=facts['master']['console_use_ssl'], +            etcd=facts['master']['etcd_use_ssl'] +        ) + +        ports = dict( +            api=facts['master']['api_port'], +            public_api=facts['master']['api_port'], +            loopback_api=facts['master']['api_port'], +            console=facts['master']['console_port'], +            public_console=facts['master']['console_port'], +            etcd=facts['master']['etcd_port'], +        ) + +        etcd_urls = [] +        if etcd_hosts != '': +            facts['master']['etcd_port'] = ports['etcd'] +            facts['master']['embedded_etcd'] = False +            for host in etcd_hosts: +                etcd_urls.append(format_url(use_ssl['etcd'], host, +                                            ports['etcd'])) +        else: +            etcd_urls = [format_url(use_ssl['etcd'], hostname, +                                    ports['etcd'])] + +        facts['master'].setdefault('etcd_urls', etcd_urls) + +        prefix_hosts = [('api', api_hostname), +                        ('public_api', api_public_hostname), +                        ('loopback_api', hostname)] + +        for prefix, host in prefix_hosts: +            facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix], +                                                                   host, +                                                                   ports[prefix])) + + +        r_lhn = "{0}:{1}".format(api_hostname, ports['api']).replace('.', '-') +        facts['master'].setdefault('loopback_cluster_name', r_lhn) +        facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn)) +        facts['master'].setdefault('loopback_user', "system:openshift-master/{0}".format(r_lhn)) + +        prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)] +        for prefix, host in prefix_hosts: +            facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix], +                                                                   host, +                                                                   ports[prefix], +                                                                   console_path)) -        if 'etcd_urls' not in facts['master']: -            etcd_urls = [] -            if etcd_hosts != '': -                facts['master']['etcd_port'] = etcd_port -                facts['master']['embedded_etcd'] = False -                for host in etcd_hosts: -                    etcd_urls.append(format_url(etcd_use_ssl, host, -                                                etcd_port)) -            else: -                etcd_urls = [format_url(etcd_use_ssl, hostname, -                                        etcd_port)] -            facts['master']['etcd_urls'] = etcd_urls -        if 'api_url' not in facts['master']: -            api_hostname = cluster_hostname if cluster_hostname else hostname -            facts['master']['api_url'] = format_url(api_use_ssl, api_hostname, -                                                    api_port) -        if 'public_api_url' not in facts['master']: -            api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname -            facts['master']['public_api_url'] = format_url(api_use_ssl, -                                                           api_public_hostname, -                                                           api_port) -        if 'console_url' not in facts['master']: -            console_hostname = cluster_hostname if cluster_hostname else hostname -            facts['master']['console_url'] = format_url(console_use_ssl, -                                                        console_hostname, -                                                        console_port, -                                                        console_path) -        if 'public_console_url' not in facts['master']: -            console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname -            facts['master']['public_console_url'] = format_url(console_use_ssl, -                                                               console_public_hostname, -                                                               console_port, -                                                               console_path)      return facts  def set_aggregate_facts(facts): @@ -1031,6 +1064,10 @@ def set_container_facts_if_unset(facts):          if 'ovs_image' not in facts['node']:              facts['node']['ovs_image'] = ovs_image +    if facts['common']['is_containerized']: +        facts['common']['admin_binary'] = '/usr/local/bin/oadm' +        facts['common']['client_binary'] = '/usr/local/bin/oc' +      return facts @@ -1108,6 +1145,7 @@ class OpenShiftFacts(object):          facts = set_project_cfg_facts_if_unset(facts)          facts = set_fluentd_facts_if_unset(facts)          facts = set_flannel_facts_if_unset(facts) +        facts = set_nuage_facts_if_unset(facts)          facts = set_node_schedulability(facts)          facts = set_master_selectors(facts)          facts = set_metrics_facts_if_unset(facts) @@ -1149,7 +1187,7 @@ class OpenShiftFacts(object):          defaults['common'] = common          if 'master' in roles: -            master = dict(api_use_ssl=True, api_port='8443', +            master = dict(api_use_ssl=True, api_port='8443', controllers_port='8444',                            console_use_ssl=True, console_path='/console',                            console_port='8443', etcd_use_ssl=True, etcd_hosts='',                            etcd_port='4001', portal_net='172.30.0.0/16', diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 9766d01ae..1f74d851a 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -6,7 +6,9 @@ os_firewall_allow:  - service: etcd embedded    port: 4001/tcp  - service: api server https -  port: 8443/tcp +  port: "{{ openshift.master.api_port }}/tcp" +- service: api controllers https +  port: "{{ openshift.master.controllers_port }}/tcp"  - service: dns tcp    port: 53/tcp  - service: dns udp @@ -24,7 +26,5 @@ os_firewall_allow:  os_firewall_deny:  - service: api server http    port: 8080/tcp -- service: former web console port -  port: 8444/tcp  - service: former etcd peer port    port: 7001/tcp diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 150b76fc8..57b50bee4 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -42,7 +42,12 @@        public_console_url: "{{ openshift_master_public_console_url | default(None) }}"        logging_public_url: "{{ openshift_master_logging_public_url | default(None) }}"        metrics_public_url: "{{ openshift_master_metrics_public_url | default(None) }}" -      etcd_hosts: "{{ openshift_master_etcd_hosts | default(None)}}" +      logout_url: "{{ openshift_master_logout_url | default(None) }}" +      extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}" +      extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}" +      extensions: "{{ openshift_master_extensions | default(None) }}" +      oauth_template: "{{ openshift_master_oauth_template | default(None) }}" +      etcd_hosts: "{{ openshift_master_etcd_hosts | default(None) }}"        etcd_port: "{{ openshift_master_etcd_port | default(None) }}"        etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"        etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}" @@ -51,6 +56,7 @@        embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"        dns_port: "{{ openshift_master_dns_port | default(None) }}"        bind_addr: "{{ openshift_master_bind_addr | default(None) }}" +      pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"        portal_net: "{{ openshift_master_portal_net | default(None) }}"        session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"        session_name: "{{ openshift_master_session_name | default(None) }}" @@ -94,12 +100,12 @@  - name: Install Master docker service file    template:      dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" -    src: master.docker.service.j2 +    src: docker/master.docker.service.j2    register: install_result    when: openshift.common.is_containerized | bool and not openshift_master_ha | bool -   +  - name: Create openshift.common.data_dir -  file:  +  file:      path: "{{ openshift.common.data_dir }}"      state: directory      mode: 0755 @@ -174,31 +180,42 @@    when: openshift.common.is_containerized | bool  # workaround for missing systemd unit files for controllers/api -- name: Create the api service file -  template: -    src: atomic-openshift-master-api{{ ha_suffix }}.service.j2 -    dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-api.service" -  when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- name: Create the controllers service file +- name: Create the systemd unit files    template: -    src: atomic-openshift-master-controllers{{ ha_suffix }}.service.j2 -    dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-controllers.service" +    src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" +    dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"    when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- name: Create the api env file +  with_items: +  - api +  - controllers +  register: create_unit_files + +- command: systemctl daemon-reload +  when: create_unit_files | changed +# end workaround for missing systemd unit files + +- name: Create the master api service env file    template: -    src: atomic-openshift-master-api.j2 +    src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api -    force: no    when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- name: Create the controllers env file +  notify: +  - restart master api + +- name: Create the master controllers service env file    template: -    src: atomic-openshift-master-controllers.j2 +    src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -    force: no -  when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -- command: systemctl daemon-reload    when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -# end workaround for missing systemd unit files +  notify: +  - restart master controllers + +- name: Create the master service env file +  template: +    src: "atomic-openshift-master.j2" +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master +  notify: +  - restart master  - name: Create session secrets file    template: @@ -223,47 +240,36 @@    - restart master api    - restart master controllers -- name: Configure master settings -  lineinfile: -    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master -    regexp: "{{ item.regex }}" -    line: "{{ item.line }}" -    create: yes -  with_items: -    - regex: '^OPTIONS=' -      line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}" -    - regex: '^CONFIG_FILE=' -      line: "CONFIG_FILE={{ openshift_master_config_file }}" -  notify: -  - restart master - -- name: Configure master api settings -  lineinfile: -    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api -    regexp: "{{ item.regex }}" -    line: "{{ item.line }}" -  with_items: -    - regex: '^OPTIONS=' -      line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443" -    - regex: '^CONFIG_FILE=' -      line: "CONFIG_FILE={{ openshift_master_config_file }}" -  when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -  notify: -  - restart master api - -- name: Configure master controller settings -  lineinfile: -    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -    regexp: "{{ item.regex }}" -    line: "{{ item.line }}" -  with_items: -    - regex: '^OPTIONS=' -      line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444" -    - regex: '^CONFIG_FILE=' -      line: "CONFIG_FILE={{ openshift_master_config_file }}" -  when: openshift_master_ha | bool and openshift_master_cluster_method == "native" -  notify: -  - restart master controllers +- name: Test local loopback context +  command: > +    {{ openshift.common.client_binary }} config view +    --config={{ openshift_master_loopback_config }} +  changed_when: false +  register: loopback_config + +- command: > +    {{ openshift.common.client_binary }} config set-cluster +    --certificate-authority={{ openshift_master_config_dir }}/ca.crt +    --embed-certs=true --server={{ openshift.master.loopback_api_url }} +    {{ openshift.master.loopback_cluster_name }} +    --config={{ openshift_master_loopback_config }} +  when: loopback_context_string not in loopback_config.stdout +  register: set_loopback_cluster + +- command: > +    {{ openshift.common.client_binary }} config set-context +    --cluster={{ openshift.master.loopback_cluster_name }} +    --namespace=default --user={{ openshift.master.loopback_user }} +    {{ openshift.master.loopback_context_name }} +    --config={{ openshift_master_loopback_config }} +  when: set_loopback_cluster | changed +  register: set_loopback_context + +- command: > +    {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} +    --config={{ openshift_master_loopback_config }} +  when: set_loopback_context | changed +  register: set_current_context  - name: Start and enable master    service: name={{ openshift.common.service_type }}-master enabled=yes state=started diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 205934248..81bae5470 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -1,5 +1,5 @@ -OPTIONS= -CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml +OPTIONS=--loglevel={{ openshift.master.debug_level }} +CONFIG_FILE={{ openshift_master_config_file }}  # Proxy configuration  # Origin uses standard HTTP_PROXY environment variables. Be sure to set diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 new file mode 120000 index 000000000..4bb7095ee --- /dev/null +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 @@ -0,0 +1 @@ +../native-cluster/atomic-openshift-master-api.j2
\ No newline at end of file diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 936c39edf..a935b82f6 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -23,4 +23,4 @@ Restart=always  [Install]  WantedBy=multi-user.target -WantedBy={{ openshift.common.service_type }}-node.service
\ No newline at end of file +WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 new file mode 120000 index 000000000..8714ebbae --- /dev/null +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 @@ -0,0 +1 @@ +../native-cluster/atomic-openshift-master-controllers.j2
\ No newline at end of file diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 6ba7d6e2a..6ba7d6e2a 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 diff --git a/roles/openshift_master/templates/master.docker.service.j2 b/roles/openshift_master/templates/docker/master.docker.service.j2 index 23781a313..23781a313 100644 --- a/roles/openshift_master/templates/master.docker.service.j2 +++ b/roles/openshift_master/templates/docker/master.docker.service.j2 diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index dfcaf1953..1eeab46fe 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -5,7 +5,7 @@ apiLevels:  - v1  apiVersion: v1  assetConfig: -  logoutURL: "" +  logoutURL: "{{ openshift.master.logout_url | default('') }}"    masterPublicURL: {{ openshift.master.public_api_url }}    publicURL: {{ openshift.master.public_console_url }}/  {% if 'logging_public_url' in openshift.master %} @@ -14,6 +14,15 @@ assetConfig:  {% if 'metrics_public_url' in openshift.master %}    metricsPublicURL: {{ openshift.master.metrics_public_url }}  {% endif %} +{% if 'extension_scripts' in openshift.master %} +  extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }} +{% endif %} +{% if 'extension_stylesheets' in openshift.master %} +  extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }} +{% endif %} +{% if 'extensions' in openshift.master %} +  extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }} +{% endif %}    servingInfo:      bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}      bindNetwork: tcp4 @@ -91,7 +100,7 @@ kubernetesMasterConfig:    controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}    masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}    masterIP: {{ openshift.common.ip }} -  podEvictionTimeout: "" +  podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}    proxyClientInfo:      certFile: master.proxy-client.crt      keyFile: master.proxy-client.key @@ -108,12 +117,16 @@ masterPublicURL: {{ openshift.master.public_api_url }}  networkConfig:    clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} -{% if openshift.common.use_openshift_sdn %} +{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}    networkPluginName: {{ openshift.common.sdn_network_plugin_name }}  {% endif %}  # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet    serviceNetworkCIDR: {{ openshift.master.portal_net }}  oauthConfig: +{% if 'oauth_template' in openshift.master %} +  templates: +    login: {{ openshift.master.oauth_template }} +{% endif %}    assetPublicURL: {{ openshift.master.public_console_url }}/    grantConfig:      method: {{ openshift.master.oauth_grant_method }} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 new file mode 100644 index 000000000..48bfa5f04 --- /dev/null +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -0,0 +1,9 @@ +OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} +CONFIG_FILE={{ openshift_master_config_file }} + +# Proxy configuration +# Origin uses standard HTTP_PROXY environment variables. Be sure to set +# NO_PROXY for your master +#NO_PROXY=master.example.com +#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT +#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index ba19fb348..ba19fb348 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 205934248..cdc56eece 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -1,5 +1,5 @@ -OPTIONS= -CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml +OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} +CONFIG_FILE={{ openshift_master_config_file }}  # Proxy configuration  # Origin uses standard HTTP_PROXY environment variables. Be sure to set diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index e6e97b24f..e6e97b24f 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index 534465451..fe88c3c16 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -1,11 +1,16 @@  ---  openshift_master_config_dir: "{{ openshift.common.config_base }}/master"  openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml" +openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" +loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"  openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"  openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"  openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"  openshift_version: "{{ openshift_pkg_version | default('') }}" +ha_svc_template_path: "{{ 'docker-cluster' if openshift.common.is_containerized | bool else 'native-cluster' }}" +ha_svc_svc_dir: "{{ '/etc/systemd/system' if openshift.common.is_containerized | bool else '/usr/lib/systemd/system' }}" +  openshift_master_valid_grant_methods:  - auto  - prompt diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml index 7ab9afb51..1b94598dd 100644 --- a/roles/openshift_master_cluster/tasks/configure.yml +++ b/roles/openshift_master_cluster/tasks/configure.yml @@ -34,11 +34,10 @@  - name: Disable stonith    command: pcs property set stonith-enabled=false -# TODO: handle case where api port is not 8443  - name: Wait for the clustered master service to be available    wait_for:      host: "{{ openshift_master_cluster_vip }}" -    port: 8443 +    port: "{{ openshift.master.api_port }}"      state: started      timeout: 180      delay: 90 diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index cbe811f83..44065f4bd 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -20,7 +20,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}  # deprecates networkPluginName above. The two should match.  networkConfig:     mtu: {{ openshift.node.sdn_mtu }} -{% if openshift.common.use_openshift_sdn %} +{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}     networkPluginName: {{ openshift.common.sdn_network_plugin_name }}  {% endif %}  {% if openshift.node.set_node_ip | bool %} diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml index bcf1d9a34..e3176e611 100644 --- a/roles/os_firewall/defaults/main.yml +++ b/roles/os_firewall/defaults/main.yml @@ -1,2 +1,3 @@  --- +os_firewall_enabled: True  os_firewall_use_firewalld: True diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml index ad89ef97c..076e5e311 100644 --- a/roles/os_firewall/tasks/main.yml +++ b/roles/os_firewall/tasks/main.yml @@ -1,6 +1,6 @@  ---  - include: firewall/firewalld.yml -  when: os_firewall_use_firewalld +  when: os_firewall_enabled | bool and os_firewall_use_firewalld | bool  - include: firewall/iptables.yml -  when: not os_firewall_use_firewalld +  when: os_firewall_enabled | bool and not os_firewall_use_firewalld | bool diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml index 7552086d4..1c8d88854 100644 --- a/roles/os_zabbix/tasks/main.yml +++ b/roles/os_zabbix/tasks/main.yml @@ -41,6 +41,10 @@    tags:    - zagg_server +- include_vars: template_config_loop.yml +  tags: +  - config_loop +  - name: Include Template Heartbeat    include: ../../lib_zabbix/tasks/create_template.yml    vars: @@ -150,3 +154,13 @@      password: "{{ ozb_password }}"    tags:    - zagg_server + +- name: Include Template Config Loop +  include: ../../lib_zabbix/tasks/create_template.yml +  vars: +    template: "{{ g_template_config_loop }}" +    server: "{{ ozb_server }}" +    user: "{{ ozb_user }}" +    password: "{{ ozb_password }}" +  tags: +  - config_loop diff --git a/roles/os_zabbix/vars/template_config_loop.yml b/roles/os_zabbix/vars/template_config_loop.yml new file mode 100644 index 000000000..823da1868 --- /dev/null +++ b/roles/os_zabbix/vars/template_config_loop.yml @@ -0,0 +1,14 @@ +--- +g_template_config_loop: +  name: Template Config Loop +  zitems: +  - key: config_loop.run.exit_code +    applications: +    - Config Loop +    value_type: int + +  ztriggers: +  - name: 'config_loop.run.exit_code not zero on {HOST.NAME}' +    expression: '{Template Config Loop:config_loop.run.exit_code.min(#2)}>0' +    url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_config_loop.asciidoc' +    priority: average diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 index 753cad69f..31f7d4caa 100644 --- a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 +++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 @@ -47,6 +47,7 @@ ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }}             -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }}                                    \             -e OSO_CLUSTER_GROUP={{ cluster_group }}                                          \             -e OSO_CLUSTER_ID={{ oo_clusterid }}                                              \ +           -e OSO_ENVIRONMENT={{ oo_environment }}                                           \             -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }}          \             -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}  \             -v /etc/localtime:/etc/localtime                                                  \  | 
