diff options
Diffstat (limited to 'playbooks/openshift-master')
21 files changed, 814 insertions, 0 deletions
| diff --git a/playbooks/openshift-master/additional_config.yml b/playbooks/openshift-master/additional_config.yml new file mode 100644 index 000000000..e43e9e002 --- /dev/null +++ b/playbooks/openshift-master/additional_config.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/additional_config.yml diff --git a/playbooks/openshift-master/certificates.yml b/playbooks/openshift-master/certificates.yml new file mode 100644 index 000000000..0384877d9 --- /dev/null +++ b/playbooks/openshift-master/certificates.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/certificates.yml diff --git a/playbooks/openshift-master/config.yml b/playbooks/openshift-master/config.yml new file mode 100644 index 000000000..8ee57ce8d --- /dev/null +++ b/playbooks/openshift-master/config.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/config.yml diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml new file mode 100644 index 000000000..32f638d42 --- /dev/null +++ b/playbooks/openshift-master/private/additional_config.yml @@ -0,0 +1,52 @@ +--- +- name: Master Additional Install Checkpoint Start +  hosts: all +  gather_facts: false +  tasks: +  - name: Set Master Additional install 'In Progress' +    run_once: true +    set_stats: +      data: +        installer_phase_master_additional: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Additional master configuration +  hosts: oo_first_master +  vars: +    cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" +    etcd_urls: "{{ openshift.master.etcd_urls }}" +    openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" +    omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" +  roles: +  - role: openshift_master_cluster +    when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" +  - role: openshift_project_request_template +    when: openshift_project_request_template_manage +  - role: openshift_examples +    when: openshift_install_examples | default(true, true) | bool +    registry_url: "{{ openshift.master.registry_url }}" +  - role: openshift_hosted_templates +    registry_url: "{{ openshift.master.registry_url }}" +  - role: openshift_manageiq +    when: openshift_use_manageiq | default(true) | bool +  - role: cockpit +    when: +    - not openshift.common.is_atomic | bool +    - deployment_type == 'openshift-enterprise' +    - osm_use_cockpit is undefined or osm_use_cockpit | bool +    - openshift.common.deployment_subtype != 'registry' +  - role: flannel_register +    when: openshift_use_flannel | default(false) | bool + +- name: Master Additional Install Checkpoint End +  hosts: all +  gather_facts: false +  tasks: +  - name: Set Master Additional install 'Complete' +    run_once: true +    set_stats: +      data: +        installer_phase_master_additional: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-master/private/certificates.yml b/playbooks/openshift-master/private/certificates.yml new file mode 100644 index 000000000..f6afbc36f --- /dev/null +++ b/playbooks/openshift-master/private/certificates.yml @@ -0,0 +1,14 @@ +--- +- name: Create OpenShift certificates for master hosts +  hosts: oo_masters_to_config +  vars: +    openshift_ca_host: "{{ groups.oo_first_master.0 }}" +  roles: +  - role: openshift_master_facts +  - role: openshift_named_certificates +  - role: openshift_ca +  - role: openshift_master_certificates +    openshift_master_etcd_hosts: "{{ hostvars +                                     | oo_select_keys(groups['oo_etcd_to_config'] | default([])) +                                     | oo_collect('openshift.common.hostname') +                                     | default(none, true) }}" diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml new file mode 100644 index 000000000..6b0fd6b7c --- /dev/null +++ b/playbooks/openshift-master/private/config.yml @@ -0,0 +1,252 @@ +--- +- name: Master Install Checkpoint Start +  hosts: all +  gather_facts: false +  tasks: +  - name: Set Master install 'In Progress' +    run_once: true +    set_stats: +      data: +        installer_phase_master: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- include: certificates.yml + +- name: Disable excluders +  hosts: oo_masters_to_config +  gather_facts: no +  roles: +  - role: openshift_excluder +    r_openshift_excluder_action: disable +    r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + +- name: Gather and set facts for master hosts +  hosts: oo_masters_to_config +  pre_tasks: +  # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 +  # +  # When scaling up a cluster upgraded from OCP <= 3.5, ensure that +  # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing +  # masters, or absent if such is the case. +  - name: Detect if this host is a new master in a scale up +    set_fact: +      g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" + +  - name: Scaleup Detection +    debug: +      var: g_openshift_master_is_scaleup + +  - name: Check for RPM generated config marker file .config_managed +    stat: +      path: /etc/origin/.config_managed +    register: rpmgenerated_config + +  - name: Remove RPM generated config files if present +    file: +      path: "/etc/origin/{{ item }}" +      state: absent +    when: +    - rpmgenerated_config.stat.exists == true +    - deployment_type == 'openshift-enterprise' +    with_items: +    - master +    - node +    - .config_managed + +  - set_fact: +      openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" +      openshift_master_etcd_hosts: "{{ hostvars +                                       | oo_select_keys(groups['oo_etcd_to_config'] +                                                        | default([])) +                                       | oo_collect('openshift.common.hostname') +                                       | default(none, true) }}" +  roles: +  - openshift_facts +  post_tasks: +  - openshift_facts: +      role: master +      local_facts: +        api_port: "{{ openshift_master_api_port | default(None) }}" +        api_url: "{{ openshift_master_api_url | default(None) }}" +        api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" +        controllers_port: "{{ openshift_master_controllers_port | default(None) }}" +        public_api_url: "{{ openshift_master_public_api_url | default(None) }}" +        cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" +        cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" +        console_path: "{{ openshift_master_console_path | default(None) }}" +        console_port: "{{ openshift_master_console_port | default(None) }}" +        console_url: "{{ openshift_master_console_url | default(None) }}" +        console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" +        public_console_url: "{{ openshift_master_public_console_url | default(None) }}" +        ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" +        master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" + +- name: Inspect state of first master config settings +  hosts: oo_first_master +  roles: +  - role: openshift_facts +  post_tasks: +  - openshift_facts: +      role: master +      local_facts: +        session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" +        session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" +  - name: Check for existing configuration +    stat: +      path: /etc/origin/master/master-config.yaml +    register: master_config_stat + +  - name: Set clean install fact +    set_fact: +      l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + +  - name: Determine if etcd3 storage is in use +    command: grep  -Pzo  "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q +    register: etcd3_grep +    failed_when: false +    changed_when: false + +  - name: Set etcd3 fact +    set_fact: +      l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + +  - name: Check if atomic-openshift-master sysconfig exists yet +    stat: +      path: /etc/sysconfig/atomic-openshift-master +    register: l_aom_exists + +  - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present +    command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master +    register: l_default_registry_defined +    when: l_aom_exists.stat.exists | bool + +  - name: Check if atomic-openshift-master-api sysconfig exists yet +    stat: +      path: /etc/sysconfig/atomic-openshift-master-api +    register: l_aom_api_exists + +  - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present +    command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api +    register: l_default_registry_defined_api +    when: l_aom_api_exists.stat.exists | bool + +  - name: Check if atomic-openshift-master-controllers sysconfig exists yet +    stat: +      path: /etc/sysconfig/atomic-openshift-master-controllers +    register: l_aom_controllers_exists + +  - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present +    command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers +    register: l_default_registry_defined_controllers +    when: l_aom_controllers_exists.stat.exists | bool + +  - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value +    set_fact: +      l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" +      l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" +      l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" + +- name: Generate master session secrets +  hosts: oo_first_master +  vars: +    g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}" +    g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}" +    g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}" +  roles: +  - role: openshift_facts +  tasks: +  - openshift_facts: +      role: master +      local_facts: +        session_auth_secrets: "{{ g_session_auth_secrets }}" +        session_encryption_secrets: "{{ g_session_encryption_secrets }}" +    when: not g_session_secrets_present | bool + +- name: Configure masters +  hosts: oo_masters_to_config +  any_errors_fatal: true +  vars: +    openshift_master_ha: "{{ openshift.master.ha }}" +    openshift_master_count: "{{ openshift.master.master_count }}" +    openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" +    openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" +    openshift_ca_host: "{{ groups.oo_first_master.0 }}" +    openshift_master_etcd_hosts: "{{ hostvars +                                     | oo_select_keys(groups['oo_etcd_to_config'] | default([])) +                                     | oo_collect('openshift.common.hostname') +                                     | default(none, true) }}" +    openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) +                                                | oo_collect('openshift.common.ip') | default([]) | join(',') +                                                }}" +  roles: +  - role: os_firewall +  - role: openshift_master_facts +  - role: openshift_hosted_facts +  - role: openshift_clock +  - role: openshift_cloud_provider +  - role: openshift_builddefaults +  - role: openshift_buildoverrides +  - role: nickhammond.logrotate +  - role: contiv +    contiv_role: netmaster +    when: openshift_use_contiv | default(False) | bool +  - role: openshift_master +    openshift_master_hosts: "{{ groups.oo_masters_to_config }}" +    r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" +    r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" +    openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" +    openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" +    openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" +    openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" +  - role: tuned +  - role: nuage_ca +    when: openshift_use_nuage | default(false) | bool +  - role: nuage_common +    when: openshift_use_nuage | default(false) | bool +  - role: nuage_master +    when: openshift_use_nuage | default(false) | bool +  - role: calico_master +    when: openshift_use_calico | default(false) | bool +  tasks: +  - include_role: +      name: kuryr +      tasks_from: master +    when: openshift_use_kuryr | default(false) | bool + +  - name: Setup the node group config maps +    include_role: +      name: openshift_node_group +    when: openshift_master_bootstrap_enabled | default(false) | bool +    run_once: True + +  post_tasks: +  - name: Create group for deployment type +    group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} +    changed_when: False + +- name: Configure API Aggregation on masters +  hosts: oo_masters +  serial: 1 +  tasks: +  - include: tasks/wire_aggregator.yml + +- name: Re-enable excluder if it was previously enabled +  hosts: oo_masters_to_config +  gather_facts: no +  roles: +  - role: openshift_excluder +    r_openshift_excluder_action: enable +    r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + +- name: Master Install Checkpoint End +  hosts: all +  gather_facts: false +  tasks: +  - name: Set Master install 'Complete' +    run_once: true +    set_stats: +      data: +        installer_phase_master: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-master/private/filter_plugins b/playbooks/openshift-master/private/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/openshift-master/private/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/openshift-master/private/library b/playbooks/openshift-master/private/library new file mode 120000 index 000000000..d0b7393d3 --- /dev/null +++ b/playbooks/openshift-master/private/library @@ -0,0 +1 @@ +../../../library/
\ No newline at end of file diff --git a/playbooks/openshift-master/private/lookup_plugins b/playbooks/openshift-master/private/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/openshift-master/private/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml new file mode 100644 index 000000000..4d73b8124 --- /dev/null +++ b/playbooks/openshift-master/private/restart.yml @@ -0,0 +1,19 @@ +--- +- include: validate_restart.yml + +- name: Restart masters +  hosts: oo_masters_to_config +  vars: +    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" +  serial: 1 +  handlers: +  - include: ../../../roles/openshift_master/handlers/main.yml +    static: yes +  roles: +  - openshift_facts +  post_tasks: +  - include: restart_hosts.yml +    when: openshift_rolling_restart_mode | default('services') == 'system' + +  - include: restart_services.yml +    when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/openshift-master/private/restart_hosts.yml b/playbooks/openshift-master/private/restart_hosts.yml new file mode 100644 index 000000000..a5dbe0590 --- /dev/null +++ b/playbooks/openshift-master/private/restart_hosts.yml @@ -0,0 +1,40 @@ +--- +- name: Restart master system +  # https://github.com/ansible/ansible/issues/10616 +  shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" +  async: 1 +  poll: 0 +  ignore_errors: true +  become: yes + +# WARNING: This process is riddled with weird behavior. + +# Workaround for https://github.com/ansible/ansible/issues/21269 +- set_fact: +    wait_for_host: "{{ ansible_host }}" + +# Ansible's blog documents this *without* the port, which appears to now +# just wait until the timeout value and then proceed without checking anything. +# port is now required. +# +# However neither ansible_ssh_port or ansible_port are reliably defined, likely +# only if overridden. Assume a default of 22. +- name: Wait for master to restart +  local_action: +    module: wait_for +      host="{{ wait_for_host }}" +      state=started +      delay=10 +      timeout=600 +      port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" +  become: no + +# Now that ssh is back up we can wait for API on the remote system, +# avoiding some potential connection issues from local system: +- name: Wait for master API to come back online +  wait_for: +    host: "{{ openshift.common.hostname }}" +    state: started +    delay: 10 +    port: "{{ openshift.master.api_port }}" +    timeout: 600 diff --git a/playbooks/openshift-master/private/restart_services.yml b/playbooks/openshift-master/private/restart_services.yml new file mode 100644 index 000000000..4e1b3a3be --- /dev/null +++ b/playbooks/openshift-master/private/restart_services.yml @@ -0,0 +1,4 @@ +--- +- include_role: +    name: openshift_master +    tasks_from: restart.yml diff --git a/playbooks/openshift-master/private/revert-client-ca.yml b/playbooks/openshift-master/private/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/openshift-master/private/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config +  hosts: oo_masters_to_config +  tasks: +  - name: Read master config +    slurp: +      src: "{{ openshift.common.config_base }}/master/master-config.yaml" +    register: g_master_config_output + +  # servingInfo.clientCA may be set as the client-ca-bundle.crt from +  # CA redeployment and this task reverts that change. +  - name: Set servingInfo.clientCA = ca.crt in master config +    modify_yaml: +      dest: "{{ openshift.common.config_base }}/master/master-config.yaml" +      yaml_key: servingInfo.clientCA +      yaml_value: ca.crt +    when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/openshift-master/private/roles b/playbooks/openshift-master/private/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openshift-master/private/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml new file mode 100644 index 000000000..021399965 --- /dev/null +++ b/playbooks/openshift-master/private/scaleup.yml @@ -0,0 +1,57 @@ +--- +- name: Update master count +  hosts: oo_masters:!oo_masters_to_config +  serial: 1 +  roles: +  - openshift_facts +  post_tasks: +  - openshift_facts: +      role: master +      local_facts: +        ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" +        master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" +  - name: Update master count +    modify_yaml: +      dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +      yaml_key: 'kubernetesMasterConfig.masterCount' +      yaml_value: "{{ openshift.master.master_count }}" +    notify: +    - restart master api +    - restart master controllers +  handlers: +  - name: restart master api +    service: name={{ openshift.common.service_type }}-master-controllers state=restarted +    notify: verify api server +  # We retry the controllers because the API may not be 100% initialized yet. +  - name: restart master controllers +    command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" +    retries: 3 +    delay: 5 +    register: result +    until: result.rc == 0 +  - name: verify api server +    command: > +      curl --silent --tlsv1.2 +      --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt +      {{ openshift.master.api_url }}/healthz/ready +    args: +      # Disables the following warning: +      # Consider using get_url or uri module rather than running curl +      warn: no +    register: api_available_output +    until: api_available_output.stdout == 'ok' +    retries: 120 +    delay: 1 +    changed_when: false + +- include: set_network_facts.yml + +- include: ../../openshift-etcd/private/certificates.yml + +- include: config.yml + +- include: ../../openshift-loadbalancer/private/config.yml + +- include: ../../openshift-node/private/certificates.yml + +- include: ../../openshift-node/private/config.yml diff --git a/playbooks/openshift-master/private/set_network_facts.yml b/playbooks/openshift-master/private/set_network_facts.yml new file mode 100644 index 000000000..9a6cf26fc --- /dev/null +++ b/playbooks/openshift-master/private/set_network_facts.yml @@ -0,0 +1,34 @@ +--- +- name: Read first master\'s config +  hosts: oo_first_master +  gather_facts: no +  tasks: +  - stat: +      path: "{{ openshift.common.config_base }}/master/master-config.yaml" +    register: g_master_config_stat +  - slurp: +      src: "{{ openshift.common.config_base }}/master/master-config.yaml" +    register: g_master_config_slurp + +- name: Set network facts for masters +  hosts: oo_masters_to_config +  gather_facts: no +  roles: +  - role: openshift_facts +  post_tasks: +  - block: +    - set_fact: +        osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" +      when: osm_cluster_network_cidr is not defined +    - set_fact: +        osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" +      when: osm_host_subnet_length is not defined +    - set_fact: +        openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" +      when: openshift_portal_net is not defined +    - openshift_facts: +        role: common +        local_facts: +          portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" +    when: +    - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml new file mode 100644 index 000000000..97acc5d5d --- /dev/null +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -0,0 +1,216 @@ +--- +- name: Make temp cert dir +  command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX +  register: certtemp +  changed_when: False + +- name: Check for First Master Aggregator Signer cert +  stat: +    path: /etc/origin/master/front-proxy-ca.crt +  register: first_proxy_ca_crt +  changed_when: false +  delegate_to: "{{ groups.oo_first_master.0 }}" + +- name: Check for First Master Aggregator Signer key +  stat: +    path: /etc/origin/master/front-proxy-ca.crt +  register: first_proxy_ca_key +  changed_when: false +  delegate_to: "{{ groups.oo_first_master.0 }}" + +# TODO: this currently has a bug where hostnames are required +- name: Creating First Master Aggregator signer certs +  command: > +    {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert +    --cert=/etc/origin/master/front-proxy-ca.crt +    --key=/etc/origin/master/front-proxy-ca.key +    --serial=/etc/origin/master/ca.serial.txt +  delegate_to: "{{ groups.oo_first_master.0 }}" +  when: +  - not first_proxy_ca_crt.stat.exists +  - not first_proxy_ca_key.stat.exists + +- name: Check for Aggregator Signer cert +  stat: +    path: /etc/origin/master/front-proxy-ca.crt +  register: proxy_ca_crt +  changed_when: false + +- name: Check for Aggregator Signer key +  stat: +    path: /etc/origin/master/front-proxy-ca.crt +  register: proxy_ca_key +  changed_when: false + +- name: Copy Aggregator Signer certs from first master +  fetch: +    src: "/etc/origin/master/{{ item }}" +    dest: "{{ certtemp.stdout }}/{{ item }}" +    flat: yes +  with_items: +  - front-proxy-ca.crt +  - front-proxy-ca.key +  delegate_to: "{{ groups.oo_first_master.0 }}" +  when: +  - not proxy_ca_key.stat.exists +  - not proxy_ca_crt.stat.exists + +- name: Copy Aggregator Signer certs to host +  copy: +    src: "{{ certtemp.stdout }}/{{ item }}" +    dest: "/etc/origin/master/{{ item }}" +  with_items: +  - front-proxy-ca.crt +  - front-proxy-ca.key +  when: +  - not proxy_ca_key.stat.exists +  - not proxy_ca_crt.stat.exists + +#  oc_adm_ca_server_cert: +#    cert: /etc/origin/master/front-proxy-ca.crt +#    key: /etc/origin/master/front-proxy-ca.key + +- name: Check for first master api-client config +  stat: +    path: /etc/origin/master/aggregator-front-proxy.kubeconfig +  register: first_front_proxy_kubeconfig +  delegate_to: "{{ groups.oo_first_master.0 }}" +  run_once: true + +# create-api-client-config generates a ca.crt file which will +# overwrite the OpenShift CA certificate.  Generate the aggregator +# kubeconfig in a temporary directory and then copy files into the +# master config dir to avoid overwriting ca.crt. +- block: +  - name: Create first master api-client config for Aggregator +    command: > +      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config +      --certificate-authority=/etc/origin/master/front-proxy-ca.crt +      --signer-cert=/etc/origin/master/front-proxy-ca.crt +      --signer-key=/etc/origin/master/front-proxy-ca.key +      --user aggregator-front-proxy +      --client-dir={{ certtemp.stdout }} +      --signer-serial=/etc/origin/master/ca.serial.txt +    delegate_to: "{{ groups.oo_first_master.0 }}" +    run_once: true +  - name: Copy first master api-client config for Aggregator +    copy: +      src: "{{ certtemp.stdout }}/{{ item }}" +      dest: "/etc/origin/master/" +      remote_src: true +    with_items: +    - aggregator-front-proxy.crt +    - aggregator-front-proxy.key +    - aggregator-front-proxy.kubeconfig +    delegate_to: "{{ groups.oo_first_master.0 }}" +    run_once: true +  when: +  - not first_front_proxy_kubeconfig.stat.exists + +- name: Check for api-client config +  stat: +    path: /etc/origin/master/aggregator-front-proxy.kubeconfig +  register: front_proxy_kubeconfig + +- name: Copy api-client config from first master +  fetch: +    src: "/etc/origin/master/{{ item }}" +    dest: "{{ certtemp.stdout }}/{{ item }}" +    flat: yes +  delegate_to: "{{ groups.oo_first_master.0 }}" +  with_items: +  - aggregator-front-proxy.crt +  - aggregator-front-proxy.key +  - aggregator-front-proxy.kubeconfig +  when: +  - not front_proxy_kubeconfig.stat.exists + +- name: Copy api-client config to host +  copy: +    src: "{{ certtemp.stdout }}/{{ item }}" +    dest: "/etc/origin/master/{{ item }}" +  with_items: +  - aggregator-front-proxy.crt +  - aggregator-front-proxy.key +  - aggregator-front-proxy.kubeconfig +  when: +  - not front_proxy_kubeconfig.stat.exists + +- name: Delete temp directory +  file: +    name: "{{ certtemp.stdout }}" +    state: absent +  changed_when: False + +- name: Setup extension file for service console UI +  template: +    src: ../templates/openshift-ansible-catalog-console.js +    dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- name: Update master config +  yedit: +    state: present +    src: /etc/origin/master/master-config.yaml +    edits: +    - key: aggregatorConfig.proxyClientInfo.certFile +      value: aggregator-front-proxy.crt +    - key: aggregatorConfig.proxyClientInfo.keyFile +      value: aggregator-front-proxy.key +    - key: authConfig.requestHeader.clientCA +      value: front-proxy-ca.crt +    - key: authConfig.requestHeader.clientCommonNames +      value: [aggregator-front-proxy] +    - key: authConfig.requestHeader.usernameHeaders +      value: [X-Remote-User] +    - key: authConfig.requestHeader.groupHeaders +      value: [X-Remote-Group] +    - key: authConfig.requestHeader.extraHeaderPrefixes +      value: [X-Remote-Extra-] +    - key: assetConfig.extensionScripts +      value: [/etc/origin/master/openshift-ansible-catalog-console.js] +    - key: kubernetesMasterConfig.apiServerArguments.runtime-config +      value: [apis/settings.k8s.io/v1alpha1=true] +    - key: admissionConfig.pluginConfig.PodPreset.configuration.kind +      value: DefaultAdmissionConfig +    - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion +      value: v1 +    - key: admissionConfig.pluginConfig.PodPreset.configuration.disable +      value: false +  register: yedit_output + +#restart master serially here +- name: restart master api +  systemd: name={{ openshift.common.service_type }}-master-api state=restarted +  when: +  - yedit_output.changed +  - openshift.master.cluster_method == 'native' + +# We retry the controllers because the API may not be 100% initialized yet. +- name: restart master controllers +  command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" +  retries: 3 +  delay: 5 +  register: result +  until: result.rc == 0 +  when: +  - yedit_output.changed +  - openshift.master.cluster_method == 'native' + +- name: Verify API Server +  # Using curl here since the uri module requires python-httplib2 and +  # wait_for port doesn't provide health information. +  command: > +    curl --silent --tlsv1.2 +    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt +    {{ openshift.master.api_url }}/healthz/ready +  args: +    # Disables the following warning: +    # Consider using get_url or uri module rather than running curl +    warn: no +  register: api_available_output +  until: api_available_output.stdout == 'ok' +  retries: 120 +  delay: 1 +  changed_when: false +  when: +  - yedit_output.changed diff --git a/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..fd02325ba --- /dev/null +++ b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml new file mode 100644 index 000000000..5dbb21502 --- /dev/null +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -0,0 +1,65 @@ +--- +- name: Validate configuration for rolling restart +  hosts: oo_masters_to_config +  roles: +  - openshift_facts +  tasks: +  - fail: +      msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" +    when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] +  - openshift_facts: +      role: "{{ item.role }}" +      local_facts: "{{ item.local_facts }}" +    with_items: +    - role: common +      local_facts: +        rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" +    - role: master +      local_facts: +        cluster_method: "{{ openshift_master_cluster_method | default(None) }}" + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - local_action: command mktemp +    register: mktemp +    changed_when: false + +- name: Check if temp file exists on any masters +  hosts: oo_masters_to_config +  tasks: +  - stat: path="{{ hostvars.localhost.mktemp.stdout }}" +    register: exists +    changed_when: false + +- name: Cleanup temp file on localhost +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent +    changed_when: false + +- name: Warn if restarting the system where ansible is running +  hosts: oo_masters_to_config +  tasks: +  - pause: +      prompt: > +        Warning: Running playbook from a host that will be restarted! +        Press CTRL+C and A to abort playbook execution. You may +        continue by pressing ENTER but the playbook will stop +        executing after this system has been restarted and services +        must be verified manually. To only restart services, set +        openshift_master_rolling_restart_mode=services in host +        inventory and relaunch the playbook. +    when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' +  - set_fact: +      current_host: "{{ exists.stat.exists }}" +    when: openshift.common.rolling_restart_mode == 'system' diff --git a/playbooks/openshift-master/restart.yml b/playbooks/openshift-master/restart.yml new file mode 100644 index 000000000..5e28e274e --- /dev/null +++ b/playbooks/openshift-master/restart.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/restart.yml diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml new file mode 100644 index 000000000..aa0dd8094 --- /dev/null +++ b/playbooks/openshift-master/scaleup.yml @@ -0,0 +1,23 @@ +--- +- include: ../init/evaluate_groups.yml + +- name: Ensure there are new_masters or new_nodes +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - fail: +      msg: > +        Detected no new_masters or no new_nodes in inventory. Please +        add hosts to the new_masters and new_nodes host groups to add +        masters. +    when: +    - g_new_master_hosts | default([]) | length == 0 +    - g_new_node_hosts | default([]) | length == 0 + +# Need a better way to do the above check for node without +# running evaluate_groups and init/main.yml +- include: ../init/main.yml + +- include: private/scaleup.yml | 
