diff options
Diffstat (limited to 'playbooks')
19 files changed, 491 insertions, 148 deletions
| diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 54d3ea278..c14d08e87 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -1,6 +1,9 @@  # This deletes *ALL* Docker images, and uninstalls OpenShift and  # Atomic Enterprise RPMs.  It is primarily intended for use  # with the tutorial as well as for developers to reset state. +# +--- +- include: uninstall.yml  - hosts:      - OSEv3:children @@ -8,59 +11,6 @@    sudo: yes    tasks: -    - service: name={{ item }} state=stopped -      with_items: -        - openvswitch -        - origin-master -        - origin-node -        - atomic-openshift-master -        - atomic-openshift-node -        - openshift-master -        - openshift-node -        - atomic-enterprise-master -        - atomic-enterprise-node -        - etcd - -    - yum: name={{ item }} state=absent -      with_items: -        - openvswitch -        - etcd -        - origin -        - origin-master -        - origin-node -        - origin-sdn-ovs -        - tuned-profiles-origin-node -        - atomic-openshift -        - atomic-openshift-master -        - atomic-openshift-node -        - atomic-openshift-sdn-ovs -        - tuned-profiles-atomic-openshift-node -        - atomic-enterprise -        - atomic-enterprise-master -        - atomic-enterprise-node -        - atomic-enterprise-sdn-ovs -        - tuned-profiles-atomic-enterprise-node -        - openshift -        - openshift-master -        - openshift-node -        - openshift-sdn-ovs -        - tuned-profiles-openshift-node - -    - shell: systemctl reset-failed -      changed_when: False - -    - shell: systemctl daemon-reload -      changed_when: False - -    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False -      - shell: docker ps -a -q | xargs docker stop        changed_when: False        failed_when: False @@ -73,27 +23,6 @@        changed_when: False        failed_when: False -    - file: path={{ item }} state=absent -      with_items: -        - /etc/openshift-sdn -        - /root/.kube -        - /etc/origin -        - /etc/atomic-enterprise -        - /etc/openshift -        - /var/lib/origin -        - /var/lib/openshift -        - /var/lib/atomic-enterprise -        - /etc/sysconfig/origin-master -        - /etc/sysconfig/origin-node -        - /etc/sysconfig/atomic-openshift-master -        - /etc/sysconfig/atomic-openshift-node -        - /etc/sysconfig/openshift-master -        - /etc/sysconfig/openshift-node -        - /etc/sysconfig/atomic-enterprise-master -        - /etc/sysconfig/atomic-enterprise-node -        - /etc/etcd -        - /var/lib/etcd -      - user: name={{ item }} state=absent remove=yes        with_items:          - alice diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..72fcd77b3 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,115 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +#  To run: +#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device> +# +#  Example: +#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +#  Notes: +#  * This will remove /var/lib/docker! +#  * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive +  hosts: "{{ cli_name }}" +  user: root +  connection: ssh +  gather_facts: no + +  pre_tasks: +  - fail: +      msg: "This playbook requires {{item}} to be set." +    when: "{{ item }} is not defined or {{ item }} == ''" +    with_items: +    - cli_docker_device + +  - name: start docker +    service: +      name: docker +      state: started + +  - name: Determine if loopback +    shell: docker info | grep 'Data file:.*loop' +    register: loop_device_check +    ignore_errors: yes + +  - debug: +      var: loop_device_check + +  - name: fail if we don't detect loopback +    fail: +      msg:  loopback not detected! Please investigate manually. +    when: loop_device_check.rc == 1 + +  - name: stop zagg client monitoring container +    service: +      name: oso-rhel7-zagg-client +      state: stopped +    ignore_errors: yes + +  - name: stop pcp client monitoring container +    service: +      name: oso-f22-host-monitoring +      state: stopped +    ignore_errors: yes + +  - name: "check to see if {{ cli_docker_device }} exists" +    command: "test -e {{ cli_docker_device }}" +    register: docker_dev_check +    ignore_errors: yes + +  - debug: var=docker_dev_check + +  - name: "fail if {{ cli_docker_device }} doesn't exist" +    fail: +      msg: "{{ cli_docker_device }} doesn't exist. Please investigate" +    when: docker_dev_check.rc != 0 + +  - name: stop docker +    service: +      name: docker +      state: stopped + +  - name: delete /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: remove /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: copy the docker-storage-setup config file +    copy: +      content: > +        DEVS={{ cli_docker_device }} +        VG=docker_vg +      dest: /etc/sysconfig/docker-storage-setup +      owner: root +      group: root +      mode: 0664 + +  - name: docker storage setup +    command: docker-storage-setup +    register: setup_output + +  - debug: var=setup_output + +  - name: extend the vg +    command: lvextend -l 90%VG /dev/docker_vg/docker-pool +    register: extend_output + +  - debug: var=extend_output + +  - name: start docker +    service: +      name: docker +      state: restarted + +  - name: docker info +    command: docker info +    register: dockerinfo + +  - debug: var=dockerinfo diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index ef9b45abd..63d473146 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -172,7 +172,7 @@    - name: pvmove onto new volume      command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" -    async: 3600 +    async: 43200      poll: 10    - name: Remove the old docker drive from the volume group diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index 026b24456..acfa89515 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -7,8 +7,8 @@ storage:    cache:      layerinfo: inmemory    s3: -    accesskey: {{ accesskey }} -    secretkey: {{ secretkey }} +    accesskey: {{ aws_access_key }} +    secretkey: {{ aws_secret_key }}      region: us-east-1      bucket: {{ clusterid }}-docker      encrypt: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 30b873db3..4dcef1a42 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,20 +1,38 @@  ---  # This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.  # Usage: -#  ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster" +#  ansible-playbook s3_registry.yml -e clusterid="mycluster"  #  # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.  # The 'clusterid' is the short name of your cluster. -- hosts: security_group_{{ clusterid }}_master +- hosts: tag_env-host-type_{{ clusterid }}-openshift-master    remote_user: root    gather_facts: False +  vars: +    aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" +    aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" +    tasks: +  - name: Check for AWS creds +    fail:  +      msg: "Couldn't find {{ item }} creds in ENV" +    when: "{{ item }} == ''" +    with_items: +    - aws_access_key +    - aws_secret_key + +  - name: Scale down registry +    command: oc scale --replicas=0 dc/docker-registry +    - name: Create S3 bucket      local_action: -      module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} +      module: s3 bucket="{{ clusterid }}-docker" mode=create + +  - name: Set up registry environment variable +    command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml    - name: Generate docker registry config      template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 @@ -43,6 +61,9 @@      command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry      when: "'dockersecrets' not in dc.stdout" +  - name: Wait for deployment config to take effect before scaling up +    pause: seconds=30 +    - name: Scale up registry      command: oc scale --replicas=1 dc/docker-registry diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml new file mode 100644 index 000000000..7d1544be8 --- /dev/null +++ b/playbooks/adhoc/uninstall.yml @@ -0,0 +1,145 @@ +# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift +# Enterprise content installed by ansible.  This includes: +# +#    configuration +#    containers +#    example templates and imagestreams +#    images +#    RPMs +--- +- hosts: +    - OSEv3:children + +  sudo: yes + +  tasks: +    - name: Detecting Operating System +      shell: ls /run/ostree-booted +      ignore_errors: yes +      failed_when: false +      register: ostree_output + +    - set_fact: +        is_atomic: "{{ ostree_output.rc == 0 }}" + +    - service: name={{ item }} state=stopped +      with_items: +        - atomic-enterprise-master +        - atomic-enterprise-node +        - atomic-openshift-master +        - atomic-openshift-master-api +        - atomic-openshift-master-controllers +        - atomic-openshift-node +        - etcd +        - openshift-master +        - openshift-master-api +        - openshift-master-controllers +        - openshift-node +        - openvswitch +        - origin-master +        - origin-master-api +        - origin-master-controllers +        - origin-node + +    - yum: name={{ item }} state=absent +      when: not is_atomic | bool +      with_items: +        - atomic-enterprise +        - atomic-enterprise-master +        - atomic-enterprise-node +        - atomic-enterprise-sdn-ovs +        - atomic-openshift +        - atomic-openshift-clients +        - atomic-openshift-master +        - atomic-openshift-node +        - atomic-openshift-sdn-ovs +        - etcd +        - openshift +        - openshift-master +        - openshift-node +        - openshift-sdn +        - openshift-sdn-ovs +        - openvswitch +        - origin +        - origin-master +        - origin-node +        - origin-sdn-ovs +        - tuned-profiles-atomic-enterprise-node +        - tuned-profiles-atomic-openshift-node +        - tuned-profiles-openshift-node +        - tuned-profiles-origin-node + +    - shell: systemctl reset-failed +      changed_when: False + +    - shell: systemctl daemon-reload +      changed_when: False + +    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node  +      changed_when: False +      failed_when: False +      with_items: +        - openshift-enterprise +        - atomic-enterprise +        - origin + +    - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}' +      changed_when: False +      failed_when: False +      register: exited_containers_to_delete +      with_items: +        - aep3/aep +        - openshift3/ose +        - openshift/origin + +    - shell: "docker rm {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ exited_containers_to_delete.results }}" + +    - shell: docker images | grep {{ item }} | awk '{ print $3 }' +      changed_when: False +      failed_when: False +      register: images_to_delete +      with_items: +        - registry.access.redhat.com/openshift3 +        - registry.access.redhat.com/aep3 +        - docker.io/openshift + +    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ images_to_delete.results }}" + +    - file: path={{ item }} state=absent +      with_items: +        - /etc/ansible/facts.d/openshift.fact +        - /etc/atomic-enterprise +        - /etc/etcd +        - /etc/openshift +        - /etc/openshift-sdn +        - /etc/origin +        - /etc/sysconfig/atomic-enterprise-master +        - /etc/sysconfig/atomic-enterprise-node +        - /etc/sysconfig/atomic-openshift-master +        - /etc/sysconfig/atomic-openshift-node +        - /etc/sysconfig/openshift-master +        - /etc/sysconfig/openshift-node +        - /etc/sysconfig/origin-master +        - /etc/sysconfig/origin-node +        - /root/.kube +        - "~{{ ansible_ssh_user }}/.kube" +        - /usr/share/openshift/examples +        - /var/lib/atomic-enterprise +        - /var/lib/etcd +        - /var/lib/openshift +        - /var/lib/origin diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index e666f0472..ae1d0127c 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -1,4 +1,14 @@  --- +- name: Upgrade base package on masters +  hosts: masters +  roles: +  - openshift_facts +  vars: +    openshift_version: "{{ openshift_pkg_version | default('') }}" +  tasks: +    - name: Upgrade base package +      yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=latest +  - name: Re-Run cluster configuration to apply latest configuration changes    include: ../../common/openshift-cluster/config.yml    vars: @@ -40,7 +50,7 @@    hosts: oo_first_master    tasks:      fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later -    when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2) +    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )  - name: Update cluster policy    hosts: oo_first_master @@ -50,6 +60,19 @@          {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig          policy reconcile-cluster-roles --confirm +- name: Update cluster policy bindings +  hosts: oo_first_master +  tasks: +    - name: oadm policy reconcile-cluster-role-bindings --confirm +      command: > +        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig +        policy reconcile-cluster-role-bindings +        --exclude-groups=system:authenticated +        --exclude-groups=system:unauthenticated +        --exclude-users=system:anonymous +        --additive-only=true --confirm +      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') +  - name: Upgrade default router    hosts: oo_first_master    vars: diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index a89275597..786918929 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -55,9 +55,4 @@      when: master_names is defined and master_names.0 is defined  - include: update.yml - -- include: ../../common/openshift-cluster/create_services.yml -  vars: -     g_svc_master: "{{ service_master }}" -  - include: list.yml diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml deleted file mode 100644 index e70709d19..000000000 --- a/playbooks/common/openshift-cluster/create_services.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Deploy OpenShift Services -  hosts: "{{ g_svc_master }}" -  connection: ssh -  gather_facts: yes -  roles: -  - openshift_registry -  - openshift_router diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 64cf7a65b..1dec923fc 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -2,6 +2,21 @@  - name: Set master facts and determine if external etcd certs need to be generated    hosts: oo_masters_to_config    pre_tasks: +  - name: Check for RPM generated config marker file .config_managed +    stat: +      path: /etc/origin/.config_managed +    register: rpmgenerated_config + +  - name: Remove RPM generated config files if present +    file: +      path: "/etc/origin/{{ item }}" +      state: absent +    when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise'] +    with_items: +    - master +    - node +    - .config_managed +    - set_fact:        openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"        openshift_master_etcd_hosts: "{{ hostvars @@ -122,6 +137,7 @@        openshift_master_certs_no_etcd:        - admin.crt        - master.kubelet-client.crt +      - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"        - master.server.crt        - openshift-master.crt        - openshift-registry.crt @@ -129,6 +145,7 @@        - etcd.server.crt        openshift_master_certs_etcd:        - master.etcd-client.crt +    - set_fact:        openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}" @@ -138,9 +155,9 @@      with_items: openshift_master_certs      register: g_master_cert_stat_result    - set_fact: -      master_certs_missing: "{{ g_master_cert_stat_result.results +      master_certs_missing: "{{ False in (g_master_cert_stat_result.results                                  | map(attribute='stat.exists') -                                | list | intersect([false])}}" +                                | list ) }}"        master_cert_subdir: master-{{ openshift.common.hostname }}        master_cert_config_dir: "{{ openshift.common.config_base }}/master" @@ -172,6 +189,7 @@      args:        creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"      with_items: masters_needing_certs +    - name: Retrieve the master cert tarball from the master      fetch:        src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" @@ -216,11 +234,19 @@    roles:    - role: openshift_master_cluster      when: openshift_master_ha | bool -  - role: openshift_examples -    when: deployment_type in ['enterprise','openshift-enterprise','origin'] +  - openshift_examples    - role: openshift_cluster_metrics      when: openshift.common.use_cluster_metrics | bool +- name: Enable cockpit +  hosts: oo_first_master +  vars: +    cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" +  roles: +  - role: cockpit +    when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and +      (osm_use_cockpit | bool or osm_use_cockpit is undefined ) +  # Additional instance config for online deployments  - name: Additional instance config    hosts: oo_masters_deployment_type_online @@ -245,3 +271,10 @@    roles:    - openshift_serviceaccounts + +- name: Create services +  hosts: oo_first_master +  roles: +  - role: openshift_router +    when: openshift.master.infra_nodes is defined +  #- role: openshift_registry diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index fd5dfcc72..6ca4f7395 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -10,6 +10,8 @@    - set_fact:        g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"        g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" +      use_sdn: "{{ do_we_use_openshift_sdn }}" +      sdn_plugin: "{{ sdn_network_plugin }}"  - include: ../../common/openshift-cluster/config.yml    vars: @@ -18,7 +20,10 @@      g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"      g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"      g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" +    g_nodeonmaster: true      openshift_cluster_id: "{{ cluster_id }}"      openshift_debug_level: 2      openshift_deployment_type: "{{ deployment_type }}"      openshift_hostname: "{{ gce_private_ip }}" +    openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn  }}" +    os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml new file mode 100644 index 000000000..0dfa3e9d7 --- /dev/null +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -0,0 +1,49 @@ +--- +- name: Populate oo_hosts_to_update group +  hosts: localhost +  gather_facts: no +  vars_files: +  - vars.yml +  tasks: +  - name: Evaluate oo_hosts_to_update +    add_host: +      name: "{{ node_ip }}" +      groups: oo_hosts_to_update +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + +- include: ../../common/openshift-cluster/update_repos_and_packages.yml + +- name: Populate oo_masters_to_config host group +  hosts: localhost +  gather_facts: no +  vars_files: +  - vars.yml +  tasks: +  - name: Evaluate oo_nodes_to_config +    add_host: +      name: "{{ node_ip }}" +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +      groups: oo_nodes_to_config + +  - name: Evaluate oo_first_master +    add_host: +      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +      groups: oo_first_master +    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +#- include: config.yml +- include: ../../common/openshift-node/config.yml +  vars: +    openshift_cluster_id: "{{ cluster_id }}" +    openshift_debug_level: 4 +    openshift_deployment_type: "{{ deployment_type }}" +    openshift_hostname: "{{ ansible_default_ipv4.address }}" +    openshift_use_openshift_sdn: true +    openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " +    os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" +    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" +    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 7a3b80da0..c22b897d5 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -34,27 +34,28 @@        count: "{{ num_infra }}"    - include: tasks/launch_instances.yml      vars: -      instances: "{{ infra_names }}" +      instances: "{{ node_names }}"        cluster: "{{ cluster_id }}"        type: "{{ k8s_type }}"        g_sub_host_type: "{{ sub_host_type }}" -  - set_fact: -      a_infra: "{{ infra_names[0] }}" -  - add_host: name={{ a_infra }} groups=service_master +  - add_host: +      name: "{{ master_names.0 }}" +      groups: service_master +    when: master_names is defined and master_names.0 is defined  - include: update.yml - -- name: Deploy OpenShift Services -  hosts: service_master -  connection: ssh -  gather_facts: yes -  roles: -  - openshift_registry -  - openshift_router - -- include: ../../common/openshift-cluster/create_services.yml -  vars: -     g_svc_master: "{{ service_master }}" +# +#- name: Deploy OpenShift Services +#  hosts: service_master +#  connection: ssh +#  gather_facts: yes +#  roles: +#  - openshift_registry +#  - openshift_router +# +#- include: ../../common/openshift-cluster/create_services.yml +#  vars: +#     g_svc_master: "{{ service_master }}"  - include: list.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 5ba0f5a48..53b2b9a5e 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -14,11 +14,11 @@        groups: oo_list_hosts        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"        ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" -    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) +    with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))  - name: List instance(s)    hosts: oo_list_hosts    gather_facts: no    tasks:    - debug: -      msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" +      msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 6307ecc27..c428cb465 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -10,14 +10,33 @@      service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"      pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"      project_id: "{{ lookup('env', 'gce_project_id') }}" +    zone: "{{ lookup('env', 'zone') }}" +    network: "{{ lookup('env', 'network') }}" +# unsupported in 1.9.+ +    #service_account_permissions: "datastore,logging-write"      tags:        - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}        - env-{{ cluster }}        - host-type-{{ type }} -      - sub-host-type-{{ sub_host_type }} +      - sub-host-type-{{ g_sub_host_type }}        - env-host-type-{{ cluster }}-openshift-{{ type }} +  when: instances |length > 0    register: gce +- set_fact: +    node_label: +      # There doesn't seem to be a way to get the region directly, so parse it out of the zone. +      region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" +      type: "{{ g_sub_host_type }}" +  when: instances |length > 0 and type == "node" + +- set_fact: +    node_label: +      # There doesn't seem to be a way to get the region directly, so parse it out of the zone. +      region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" +      type: "{{ type }}" +  when: instances |length > 0 and type != "node" +  - name: Add new instances to groups and set variables needed    add_host:      hostname: "{{ item.name }}" @@ -27,16 +46,17 @@      groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"      gce_public_ip: "{{ item.public_ip }}"      gce_private_ip: "{{ item.private_ip }}" -  with_items: gce.instance_data +    openshift_node_labels: "{{ node_label }}" +  with_items: gce.instance_data | default([], true)  - name: Wait for ssh    wait_for: port=22 host={{ item.public_ip }} -  with_items: gce.instance_data +  with_items: gce.instance_data | default([], true)  - name: Wait for user setup    command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"    register: result    until: result.rc == 0 -  retries: 20 -  delay: 10 -  with_items: gce.instance_data +  retries: 30 +  delay: 5 +  with_items: gce.instance_data | default([], true) diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 098b0df73..e20e0a8bc 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -1,25 +1,18 @@  ---  - name: Terminate instance(s)    hosts: localhost +  connection: local    gather_facts: no    vars_files:    - vars.yml    tasks: -  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node +  - set_fact: scratch_group=tag_env-{{ cluster_id }}    - add_host:        name: "{{ item }}" -      groups: oo_hosts_to_terminate, oo_nodes_to_terminate +      groups: oo_hosts_to_terminate        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"        ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" -    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - -  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master -  - add_host: -      name: "{{ item }}" -      groups: oo_hosts_to_terminate, oo_masters_to_terminate -      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" -      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" -    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) +    with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))  - name: Unsubscribe VMs    hosts: oo_hosts_to_terminate @@ -32,14 +25,34 @@            lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |              default('no', True) | lower in ['no', 'false'] -- include: ../openshift-node/terminate.yml -  vars: -    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -    gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +- name: Terminate instances(s) +  hosts: localhost +  connection: local +  gather_facts: no +  vars_files: +  - vars.yml +  tasks: + +    - name: Terminate instances that were previously launched +      local_action: +        module: gce +        state: 'absent' +        name: "{{ item }}" +        service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +        pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +        project_id: "{{ lookup('env', 'gce_project_id') }}" +        zone: "{{ lookup('env', 'zone') }}" +      with_items: groups['oo_hosts_to_terminate'] | default([], true) +      when: item is defined -- include: ../openshift-master/terminate.yml -  vars: -    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -    gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +#- include: ../openshift-node/terminate.yml +#  vars: +#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}" +# +#- include: ../openshift-master/terminate.yml +#  vars: +#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" +#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" +#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index ae33083b9..6de007807 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1,8 +1,11 @@  --- +do_we_use_openshift_sdn: true +sdn_network_plugin: redhat/openshift-ovs-subnet  +# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation  deployment_vars:    origin: -    image: centos-7 -    ssh_user: +    image: preinstalled-slave-50g-v5 +    ssh_user: root      sudo: yes    online:      image: libra-rhel7 @@ -12,4 +15,3 @@ deployment_vars:      image: rhel-7      ssh_user:      sudo: yes - diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 2a0c90b46..4b91c6da8 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -64,7 +64,7 @@    register: nb_allocated_ips    until: nb_allocated_ips.stdout == '{{ instances | length }}'    retries: 60 -  delay: 1 +  delay: 3    when: instances | length != 0  - name: Collect IP addresses of the VMs diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data index 77b788109..eacae7c7e 100644 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -19,5 +19,5 @@ system_info:  ssh_authorized_keys:    - {{ lookup('file', '~/.ssh/id_rsa.pub') }} -bootcmd: +runcmd:    - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart | 
