diff options
Diffstat (limited to 'playbooks')
| -rw-r--r-- | playbooks/adhoc/atomic_openshift_tutorial_reset.yml | 77 | ||||
| -rwxr-xr-x | playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml | 104 | ||||
| -rw-r--r-- | playbooks/adhoc/s3_registry/s3_registry.yml | 16 | ||||
| -rw-r--r-- | playbooks/adhoc/uninstall.yml | 134 | ||||
| -rw-r--r-- | playbooks/adhoc/upgrades/upgrade.yml | 15 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/launch.yml | 5 | ||||
| -rw-r--r-- | playbooks/common/openshift-cluster/create_services.yml | 8 | ||||
| -rw-r--r-- | playbooks/common/openshift-master/config.yml | 34 | ||||
| -rw-r--r-- | playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml | 2 | ||||
| -rw-r--r-- | playbooks/libvirt/openshift-cluster/templates/user-data | 2 | 
10 files changed, 302 insertions, 95 deletions
| diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 54d3ea278..c14d08e87 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -1,6 +1,9 @@  # This deletes *ALL* Docker images, and uninstalls OpenShift and  # Atomic Enterprise RPMs.  It is primarily intended for use  # with the tutorial as well as for developers to reset state. +# +--- +- include: uninstall.yml  - hosts:      - OSEv3:children @@ -8,59 +11,6 @@    sudo: yes    tasks: -    - service: name={{ item }} state=stopped -      with_items: -        - openvswitch -        - origin-master -        - origin-node -        - atomic-openshift-master -        - atomic-openshift-node -        - openshift-master -        - openshift-node -        - atomic-enterprise-master -        - atomic-enterprise-node -        - etcd - -    - yum: name={{ item }} state=absent -      with_items: -        - openvswitch -        - etcd -        - origin -        - origin-master -        - origin-node -        - origin-sdn-ovs -        - tuned-profiles-origin-node -        - atomic-openshift -        - atomic-openshift-master -        - atomic-openshift-node -        - atomic-openshift-sdn-ovs -        - tuned-profiles-atomic-openshift-node -        - atomic-enterprise -        - atomic-enterprise-master -        - atomic-enterprise-node -        - atomic-enterprise-sdn-ovs -        - tuned-profiles-atomic-enterprise-node -        - openshift -        - openshift-master -        - openshift-node -        - openshift-sdn-ovs -        - tuned-profiles-openshift-node - -    - shell: systemctl reset-failed -      changed_when: False - -    - shell: systemctl daemon-reload -      changed_when: False - -    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False -      - shell: docker ps -a -q | xargs docker stop        changed_when: False        failed_when: False @@ -73,27 +23,6 @@        changed_when: False        failed_when: False -    - file: path={{ item }} state=absent -      with_items: -        - /etc/openshift-sdn -        - /root/.kube -        - /etc/origin -        - /etc/atomic-enterprise -        - /etc/openshift -        - /var/lib/origin -        - /var/lib/openshift -        - /var/lib/atomic-enterprise -        - /etc/sysconfig/origin-master -        - /etc/sysconfig/origin-node -        - /etc/sysconfig/atomic-openshift-master -        - /etc/sysconfig/atomic-openshift-node -        - /etc/sysconfig/openshift-master -        - /etc/sysconfig/openshift-node -        - /etc/sysconfig/atomic-enterprise-master -        - /etc/sysconfig/atomic-enterprise-node -        - /etc/etcd -        - /var/lib/etcd -      - user: name={{ item }} state=absent remove=yes        with_items:          - alice diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..614b2537a --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,104 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +#  To run: +#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device> +# +#  Example: +#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +#  Notes: +#  * This will remove /var/lib/docker! +#  * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive +  hosts: "{{ cli_name }}" +  user: root +  connection: ssh +  gather_facts: no + +  pre_tasks: +  - fail: +      msg: "This playbook requires {{item}} to be set." +    when: "{{ item }} is not defined or {{ item }} == ''" +    with_items: +    - cli_docker_device + +  - name: start docker +    service: +      name: docker +      state: started + +  - name: Determine if loopback +    shell: docker info | grep 'Data file:.*loop' +    register: loop_device_check +    ignore_errors: yes + +  - debug: +      var: loop_device_check + +  - name: fail if we don't detect loopback +    fail: +      msg:  loopback not detected! Please investigate manually. +    when: loop_device_check.rc == 1 + +  - name: stop zagg client monitoring container +    service: +      name: oso-rhel7-zagg-client +      state: stopped +    ignore_errors: yes + +  - name: stop pcp client monitoring container +    service: +      name: oso-f22-host-monitoring +      state: stopped +    ignore_errors: yes + +  - name: "check to see if {{ cli_docker_device }} exists" +    command: "test -e {{ cli_docker_device }}" +    register: docker_dev_check +    ignore_errors: yes + +  - debug: var=docker_dev_check + +  - name: "fail if {{ cli_docker_device }} doesn't exist" +    fail: +      msg: "{{ cli_docker_device }} doesn't exist. Please investigate" +    when: docker_dev_check.rc != 0 + +  - name: stop docker +    service: +      name: docker +      state: stopped + +  - name: delete /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: remove /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: copy the docker-storage-setup config file +    copy: +      content: > +        DEVS={{ cli_docker_device }} +        VG=docker_vg +      dest: /etc/sysconfig/docker-storage-setup +      owner: root +      group: root +      mode: 0664 + +  - name: docker storage setup +    command: docker-storage-setup +    register: setup_output + +  - debug: var=setup_output + +  - name: start docker +    command: systemctl start docker.service +    register: dockerstart + +  - debug: var=dockerstart diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index d1546b6fa..4dcef1a42 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -6,13 +6,14 @@  # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.  # The 'clusterid' is the short name of your cluster. -- hosts: security_group_{{ clusterid }}_master +- hosts: tag_env-host-type_{{ clusterid }}-openshift-master    remote_user: root    gather_facts: False    vars: -    aws_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" -    aws_secret_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" +    aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" +    aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" +    tasks:    - name: Check for AWS creds @@ -23,10 +24,16 @@      - aws_access_key      - aws_secret_key +  - name: Scale down registry +    command: oc scale --replicas=0 dc/docker-registry +    - name: Create S3 bucket      local_action:        module: s3 bucket="{{ clusterid }}-docker" mode=create +  - name: Set up registry environment variable +    command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml +    - name: Generate docker registry config      template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 @@ -54,6 +61,9 @@      command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry      when: "'dockersecrets' not in dc.stdout" +  - name: Wait for deployment config to take effect before scaling up +    pause: seconds=30 +    - name: Scale up registry      command: oc scale --replicas=1 dc/docker-registry diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml new file mode 100644 index 000000000..40db668da --- /dev/null +++ b/playbooks/adhoc/uninstall.yml @@ -0,0 +1,134 @@ +# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift +# Enterprise content installed by ansible.  This includes: +#  +#    configuration +#    containers +#    example templates and imagestreams +#    images +#    RPMs +--- +- hosts: +    - OSEv3:children + +  sudo: yes + +  tasks: +    - service: name={{ item }} state=stopped +      with_items: +        - atomic-enterprise-master +        - atomic-enterprise-node +        - atomic-openshift-master +        - atomic-openshift-master-api +        - atomic-openshift-master-controllers +        - atomic-openshift-node +        - etcd +        - openshift-master +        - openshift-master-api +        - openshift-master-controllers +        - openshift-node +        - openvswitch +        - origin-master +        - origin-master-api +        - origin-master-controllers +        - origin-node + +    - yum: name={{ item }} state=absent +      with_items: +        - atomic-enterprise +        - atomic-enterprise-master +        - atomic-enterprise-node +        - atomic-enterprise-sdn-ovs +        - atomic-openshift +        - atomic-openshift-clients +        - atomic-openshift-master +        - atomic-openshift-node +        - atomic-openshift-sdn-ovs +        - etcd +        - openshift +        - openshift-master +        - openshift-node +        - openshift-sdn +        - openshift-sdn-ovs +        - openvswitch +        - origin +        - origin-master +        - origin-node +        - origin-sdn-ovs +        - tuned-profiles-atomic-enterprise-node +        - tuned-profiles-atomic-openshift-node +        - tuned-profiles-openshift-node +        - tuned-profiles-origin-node + +    - shell: systemctl reset-failed +      changed_when: False + +    - shell: systemctl daemon-reload +      changed_when: False + +    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node  +      changed_when: False +      failed_when: False +      with_items: +        - openshift-enterprise +        - atomic-enterprise +        - origin + +    - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}' +      changed_when: False +      failed_when: False +      register: exited_containers_to_delete +      with_items: +        - aep3/aep +        - openshift3/ose +        - openshift/origin + +    - shell: "docker rm {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ exited_containers_to_delete.results }}" + +    - shell: docker images | grep {{ item }} | awk '{ print $3 }' +      changed_when: False +      failed_when: False +      register: images_to_delete +      with_items: +        - registry.access.redhat.com/openshift3 +        - registry.access.redhat.com/aep3 +        - docker.io/openshift + +    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ images_to_delete.results }}" + +    - file: path={{ item }} state=absent +      with_items: +        - /etc/ansible/facts.d/openshift.fact +        - /etc/atomic-enterprise +        - /etc/etcd +        - /etc/openshift +        - /etc/openshift-sdn +        - /etc/origin +        - /etc/sysconfig/atomic-enterprise-master +        - /etc/sysconfig/atomic-enterprise-node +        - /etc/sysconfig/atomic-openshift-master +        - /etc/sysconfig/atomic-openshift-node +        - /etc/sysconfig/openshift-master +        - /etc/sysconfig/openshift-node +        - /etc/sysconfig/origin-master +        - /etc/sysconfig/origin-node +        - /root/.kube +        - /usr/share/openshift/examples +        - /var/lib/atomic-enterprise +        - /var/lib/etcd +        - /var/lib/openshift +        - /var/lib/origin diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index e666f0472..56a1df860 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -40,7 +40,7 @@    hosts: oo_first_master    tasks:      fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later -    when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2) +    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )  - name: Update cluster policy    hosts: oo_first_master @@ -50,6 +50,19 @@          {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig          policy reconcile-cluster-roles --confirm +- name: Update cluster policy bindings +  hosts: oo_first_master +  tasks: +    - name: oadm policy reconcile-cluster-role-bindings --confirm +      command: > +        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig +        policy reconcile-cluster-role-bindings +        --exclude-groups=system:authenticated +        --exclude-groups=system:unauthenticated +        --exclude-users=system:anonymous +        --additive-only=true --confirm +      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') +  - name: Upgrade default router    hosts: oo_first_master    vars: diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index a89275597..786918929 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -55,9 +55,4 @@      when: master_names is defined and master_names.0 is defined  - include: update.yml - -- include: ../../common/openshift-cluster/create_services.yml -  vars: -     g_svc_master: "{{ service_master }}" -  - include: list.yml diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml deleted file mode 100644 index e70709d19..000000000 --- a/playbooks/common/openshift-cluster/create_services.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Deploy OpenShift Services -  hosts: "{{ g_svc_master }}" -  connection: ssh -  gather_facts: yes -  roles: -  - openshift_registry -  - openshift_router diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 64cf7a65b..0a3fe90e1 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -2,6 +2,21 @@  - name: Set master facts and determine if external etcd certs need to be generated    hosts: oo_masters_to_config    pre_tasks: +  - name: Check for RPM generated config marker file .config_managed +    stat: +      path: /etc/origin/.config_managed +    register: rpmgenerated_config + +  - name: Remove RPM generated config files if present +    file: +      path: "/etc/origin/{{ item }}" +      state: absent +    when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise'] +    with_items: +    - master +    - node +    - .config_managed +    - set_fact:        openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"        openshift_master_etcd_hosts: "{{ hostvars @@ -216,11 +231,19 @@    roles:    - role: openshift_master_cluster      when: openshift_master_ha | bool -  - role: openshift_examples -    when: deployment_type in ['enterprise','openshift-enterprise','origin'] +  - openshift_examples    - role: openshift_cluster_metrics      when: openshift.common.use_cluster_metrics | bool +- name: Enable cockpit +  hosts: oo_first_master +  vars: +    cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" +  roles: +  - role: cockpit +    when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and +      (osm_use_cockpit | bool or osm_use_cockpit is undefined ) +  # Additional instance config for online deployments  - name: Additional instance config    hosts: oo_masters_deployment_type_online @@ -245,3 +268,10 @@    roles:    - openshift_serviceaccounts + +- name: Create services +  hosts: oo_first_master +  roles: +  - role: openshift_router +    when: openshift.master.infra_nodes is defined +  #- role: openshift_registry diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 2a0c90b46..4b91c6da8 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -64,7 +64,7 @@    register: nb_allocated_ips    until: nb_allocated_ips.stdout == '{{ instances | length }}'    retries: 60 -  delay: 1 +  delay: 3    when: instances | length != 0  - name: Collect IP addresses of the VMs diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data index 77b788109..eacae7c7e 100644 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -19,5 +19,5 @@ system_info:  ssh_authorized_keys:    - {{ lookup('file', '~/.ssh/id_rsa.pub') }} -bootcmd: +runcmd:    - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart | 
