diff options
| author | Diego Castro <spinolacastro@gmail.com> | 2015-10-22 16:22:43 -0300 | 
|---|---|---|
| committer | Diego Castro <spinolacastro@gmail.com> | 2015-10-22 16:22:43 -0300 | 
| commit | f559eb3146c65a6ec58f0fed3eb2d1124c1314c7 (patch) | |
| tree | 24975a7a87568784b88252bc70d14f7b302670b0 /playbooks/adhoc | |
| parent | 8468d25fae71c80277c10ad975641cb1ba230fd8 (diff) | |
| parent | e6d426fddd79c08452195cd32286bb600f62d51d (diff) | |
| download | openshift-f559eb3146c65a6ec58f0fed3eb2d1124c1314c7.tar.gz openshift-f559eb3146c65a6ec58f0fed3eb2d1124c1314c7.tar.bz2 openshift-f559eb3146c65a6ec58f0fed3eb2d1124c1314c7.tar.xz openshift-f559eb3146c65a6ec58f0fed3eb2d1124c1314c7.zip  | |
fix merge conflicts
Diffstat (limited to 'playbooks/adhoc')
29 files changed, 1041 insertions, 331 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml new file mode 100644 index 000000000..c14d08e87 --- /dev/null +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -0,0 +1,29 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs.  It is primarily intended for use +# with the tutorial as well as for developers to reset state. +# +--- +- include: uninstall.yml + +- hosts: +    - OSEv3:children + +  sudo: yes + +  tasks: +    - shell: docker ps -a -q | xargs docker stop +      changed_when: False +      failed_when: False + +    - shell: docker ps -a -q| xargs docker rm +      changed_when: False +      failed_when: False + +    - shell:  docker images -q |xargs docker rmi +      changed_when: False +      failed_when: False + +    - user: name={{ item }} state=absent remove=yes +      with_items: +        - alice +        - joe diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 684a0ca72..4f0ef7a75 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -50,6 +50,16 @@    - debug: var=vol +  - name: tag the vol with a name +    ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}} +    args: +      tags: +        Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}" +        env: "{{cli_environment}}" +    register: voltags + +  - debug: var=voltags +  - name: Configure the drive    gather_facts: no    hosts: oo_master @@ -118,6 +128,13 @@        state: unmounted        fstype: ext4 +  - name: remove from fstab +    mount: +      name: "{{ pv_mntdir }}" +      src: "{{ cli_device_name }}" +      state: absent +      fstype: ext4 +    - name: detach drive      delegate_to: localhost      ec2_vol: diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup new file mode 100644 index 000000000..059058823 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup @@ -0,0 +1,2 @@ +DEVS=/dev/xvdb +VG=docker_vg diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml new file mode 100644 index 000000000..b6a2d2f26 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -0,0 +1,142 @@ +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker) +#  in AWS.  This adds an additional EBS volume and creates the Volume Group on this EBS volume to use. +# +#  To run: +#  1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +#    export AWS_ACCESS_KEY_ID='XXXXX' +#    export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +#   ansible-playbook -e 'cli_tag_name=<tag-name>' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# +#  Example: +#   ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# +#  Notes: +#  * By default this will do a 30GB volume. +#  * iops are calculated by Disk Size * 30.  e.g ( 30GB * 30) = 900 iops +#  * This will remove /var/lib/docker! +#  * You may need to re-deploy docker images after this is run (like monitoring) +# + +- name: Fix docker to have a provisioned iops drive +  hosts: "tag_Name_{{ cli_tag_name }}" +  user: root +  connection: ssh +  gather_facts: no + +  vars: +    cli_volume_type: gp2 +    cli_volume_size: 30 + +  pre_tasks: +  - fail: +      msg: "This playbook requires {{item}} to be set." +    when: "{{ item }} is not defined or {{ item }} == ''" +    with_items: +    - cli_tag_name +    - cli_volume_size + +  - debug: +      var: hosts + +  - name: start docker +    service: +      name: docker +      state: started + +  - name: Determine if loopback +    shell: docker info | grep 'Data file:.*loop' +    register: loop_device_check +    ignore_errors: yes + +  - debug: +      var: loop_device_check + +  - name: fail if we don't detect loopback +    fail: +      msg:  loopback not detected! Please investigate manually. +    when: loop_device_check.rc == 1 + +  - name: stop zagg client monitoring container +    service: +      name: oso-rhel7-zagg-client +      state: stopped +    ignore_errors: yes + +  - name: stop pcp client monitoring container +    service: +      name: oso-f22-host-monitoring +      state: stopped +    ignore_errors: yes + +  - name: stop docker +    service: +      name: docker +      state: stopped + +  - name: delete /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: remove /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: check to see if /dev/xvdb exists +    command: test -e /dev/xvdb +    register: xvdb_check +    ignore_errors: yes + +  - debug: var=xvdb_check + +  - name: fail if /dev/xvdb already exists +    fail: +      msg: /dev/xvdb already exists.  Please investigate +    when: xvdb_check.rc == 0 + +  - name: Create a volume and attach it +    delegate_to: localhost +    ec2_vol: +      state: present +      instance: "{{ ec2_id }}" +      region: "{{ ec2_region }}" +      volume_size: "{{ cli_volume_size | default(30, True)}}" +      volume_type: "{{ cli_volume_type }}" +      device_name: /dev/xvdb +    register: vol + +  - debug: var=vol + +  - name: tag the vol with a name +    delegate_to: localhost +    ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }} +    args: +      tags: +        Name: "{{ ec2_tag_Name }}" +        env: "{{ ec2_tag_environment }}" +    register: voltags + +  - name: Wait for volume to attach +    pause: +      seconds: 30 + +  - name: copy the docker-storage-setup config file +    copy: +      src: docker-storage-setup +      dest: /etc/sysconfig/docker-storage-setup +      owner: root +      group: root +      mode: 0664 + +  - name: docker storage setup +    command: docker-storage-setup +    register: setup_output + +  - debug: var=setup_output + +  - name: start docker +    command: systemctl start docker.service +    register: dockerstart + +  - debug: var=dockerstart + diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..614b2537a --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,104 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +#  To run: +#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device> +# +#  Example: +#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +#  Notes: +#  * This will remove /var/lib/docker! +#  * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive +  hosts: "{{ cli_name }}" +  user: root +  connection: ssh +  gather_facts: no + +  pre_tasks: +  - fail: +      msg: "This playbook requires {{item}} to be set." +    when: "{{ item }} is not defined or {{ item }} == ''" +    with_items: +    - cli_docker_device + +  - name: start docker +    service: +      name: docker +      state: started + +  - name: Determine if loopback +    shell: docker info | grep 'Data file:.*loop' +    register: loop_device_check +    ignore_errors: yes + +  - debug: +      var: loop_device_check + +  - name: fail if we don't detect loopback +    fail: +      msg:  loopback not detected! Please investigate manually. +    when: loop_device_check.rc == 1 + +  - name: stop zagg client monitoring container +    service: +      name: oso-rhel7-zagg-client +      state: stopped +    ignore_errors: yes + +  - name: stop pcp client monitoring container +    service: +      name: oso-f22-host-monitoring +      state: stopped +    ignore_errors: yes + +  - name: "check to see if {{ cli_docker_device }} exists" +    command: "test -e {{ cli_docker_device }}" +    register: docker_dev_check +    ignore_errors: yes + +  - debug: var=docker_dev_check + +  - name: "fail if {{ cli_docker_device }} doesn't exist" +    fail: +      msg: "{{ cli_docker_device }} doesn't exist. Please investigate" +    when: docker_dev_check.rc != 0 + +  - name: stop docker +    service: +      name: docker +      state: stopped + +  - name: delete /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: remove /var/lib/docker +    command: rm -rf /var/lib/docker + +  - name: copy the docker-storage-setup config file +    copy: +      content: > +        DEVS={{ cli_docker_device }} +        VG=docker_vg +      dest: /etc/sysconfig/docker-storage-setup +      owner: root +      group: root +      mode: 0664 + +  - name: docker storage setup +    command: docker-storage-setup +    register: setup_output + +  - debug: var=setup_output + +  - name: start docker +    command: systemctl start docker.service +    register: dockerstart + +  - debug: var=dockerstart diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml new file mode 100644 index 000000000..a19291a9f --- /dev/null +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -0,0 +1,69 @@ +--- +# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues. +# +#  To run: +# +#  1. run the playbook: +# +#   ansible-playbook -e 'cli_tag_name=<tag-name>' docker_storage_cleanup.yml +# +#  Example: +# +#   ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml +# +#  Notes: +#  *  This *should* not interfere with running docker images +# + +- name: Clean up Docker Storage +  gather_facts: no +  hosts: "tag_Name_{{ cli_tag_name }}" +  user: root +  connection: ssh + +  pre_tasks: + +  - fail: +      msg: "This playbook requires {{item}} to be set." +    when: "{{ item }} is not defined or {{ item }} == ''" +    with_items: +    - cli_tag_name + +  - name: Ensure docker is running +    service: +      name: docker +      state: started +      enabled: yes + +  - name: Get docker info +    command: docker info +    register: docker_info + +  - name: Show docker info +    debug: +      var: docker_info.stdout_lines + +  - name: Remove exited and dead containers +    shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm" +    ignore_errors: yes + +  - name: Remove dangling docker images +    shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" +    ignore_errors: yes + +  - name: Remove non-running docker images +    shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null" +    ignore_errors: yes + +  # leaving off the '-t' for docker exec.  With it, it doesn't work with ansible and tty support +  - name: update zabbix docker items +    command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py + +  # Get and show docker info again. +  - name: Get docker info +    command: docker info +    register: docker_info + +  - name: Show docker info +    debug: +      var: docker_info.stdout_lines diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py new file mode 100644 index 000000000..d0264cde9 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 +''' +Custom filters for use in openshift-ansible +''' + +import pdb + + +class FilterModule(object): +    ''' Custom ansible filters ''' + +    @staticmethod +    def oo_pdb(arg): +        ''' This pops you into a pdb instance where arg is the data passed in +            from the filter. +            Ex: "{{ hostvars | oo_pdb }}" +        ''' +        pdb.set_trace() +        return arg + +    @staticmethod +    def translate_volume_name(volumes, target_volume): +        ''' +            This filter matches a device string /dev/sdX to /dev/xvdX +            It will then return the AWS volume ID +        ''' +        for vol in volumes: +            translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd") +            if target_volume.startswith(translated_name): +                return vol["id"] + +        return None + + +    def filters(self): +        ''' returns a mapping of filters to methods ''' +        return { +            "translate_volume_name": self.translate_volume_name, +        } diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml new file mode 100644 index 000000000..63d473146 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -0,0 +1,206 @@ +--- +# This playbook grows the docker VG on a node by: +#  * add a new volume +#  * add volume to the existing VG. +#  * pv move to the new volume. +#  * remove old volume +#  * detach volume +#  * mark old volume in AWS with "REMOVE ME" tag +#  * grow docker LVM to 90% of the VG +# +#  To run: +#  1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +#    export AWS_ACCESS_KEY_ID='XXXXX' +#    export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +#   ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml +# +#  Example: +#   ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml +# +#  Notes: +#  * By default this will do a 55GB GP2 volume.  The can be overidden with the "-e 'cli_volume_size=100'" variable +#  * This does a GP2 by default.  Support for Provisioned IOPS has not been added +#  * This will assign the new volume to /dev/xvdc. This is not variablized, yet. +#  * This can be done with NO downtime on the host +#  * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool".  This is +#      the LV that gets created via the "docker-storage-setup" command +# + +- name: Grow the docker volume group +  hosts: "tag_Name_{{ cli_tag_name }}" +  user: root +  connection: ssh +  gather_facts: no + +  vars: +    cli_volume_type: gp2 +    cli_volume_size: 55 +#    cli_volume_iops: "{{ 30 * cli_volume_size }}" + +  pre_tasks: +  - fail: +      msg: "This playbook requires {{item}} to be set." +    when: "{{ item }} is not defined or {{ item }} == ''" +    with_items: +    - cli_tag_name +    - cli_volume_size + +  - debug: +      var: hosts + +  - name: start docker +    service: +      name: docker +      state: started + +  - name: Determine if Storage Driver (docker info) is devicemapper +    shell: docker info | grep 'Storage Driver:.*devicemapper' +    register: device_mapper_check +    ignore_errors: yes + +  - debug: +      var: device_mapper_check + +  - name: fail if we don't detect devicemapper +    fail: +      msg:  The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually. +    when: device_mapper_check.rc == 1 + +  # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test +  # and find the volume group. +  - name: Attempt to find the Volume Group that docker is using +    shell: lvs | grep docker-pool | awk '{print $2}' +    register: docker_vg_name +    ignore_errors: yes + +  - debug: +      var: docker_vg_name + +  - name: fail if we don't find a docker volume group +    fail: +      msg:  Unable to find docker volume group. Please investigate manually. +    when: docker_vg_name.stdout_lines|length != 1 + +  # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test +  # and find the physical volume. +  - name: Attempt to find the Phyisical Volume that docker is using +    shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'" +    register: docker_pv_name +    ignore_errors: yes + +  - debug: +      var: docker_pv_name + +  - name: fail if we don't find a docker physical volume +    fail: +      msg:  Unable to find docker physical volume. Please investigate manually. +    when: docker_pv_name.stdout_lines|length != 1 + + +  - name: get list of volumes from AWS +    delegate_to: localhost +    ec2_vol: +      state: list +      instance: "{{ ec2_id }}" +      region: "{{ ec2_region }}" +    register: attached_volumes + +  - debug: var=attached_volumes + +  - name: get volume id of current docker volume +    set_fact: +      old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}" + +  - debug: var=old_docker_volume_id + +  - name: check to see if /dev/xvdc exists +    command: test -e /dev/xvdc +    register: xvdc_check +    ignore_errors: yes + +  - debug: var=xvdc_check + +  - name: fail if /dev/xvdc already exists +    fail: +      msg: /dev/xvdc already exists.  Please investigate +    when: xvdc_check.rc == 0 + +  - name: Create a volume and attach it +    delegate_to: localhost +    ec2_vol: +      state: present +      instance: "{{ ec2_id }}" +      region: "{{ ec2_region }}" +      volume_size: "{{ cli_volume_size | default(30, True)}}" +      volume_type: "{{ cli_volume_type }}" +      device_name: /dev/xvdc +    register: create_volume + +  - debug: var=create_volume + +  - name: Fail when problems creating volumes and attaching +    fail: +      msg: "Failed to create or attach volume msg: {{ create_volume.msg }}" +    when: create_volume.msg is defined + +  - name: tag the vol with a name +    delegate_to: localhost +    ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }} +    args: +      tags: +        Name: "{{ ec2_tag_Name }}" +        env: "{{ ec2_tag_environment }}" +    register: voltags + +  - name: check for attached drive +    command: test -b /dev/xvdc +    register: attachment_check +    until: attachment_check.rc == 0 +    retries: 30 +    delay: 2 + +  - name: partition the new drive and make it lvm +    command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm + +  - name: pvcreate /dev/xvdc +    command: pvcreate /dev/xvdc1 + +  - name: Extend the docker volume group +    command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1 + +  - name: pvmove onto new volume +    command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" +    async: 43200 +    poll: 10 + +  - name: Remove the old docker drive from the volume group +    command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}" + +  - name: Remove the pv from the old drive +    command: "pvremove {{ docker_pv_name.stdout }}" + +  - name: Extend the docker lvm +    command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool" + +  - name: detach  old docker volume +    delegate_to: localhost +    ec2_vol: +      region: "{{ ec2_region }}" +      id: "{{ old_docker_volume_id }}" +      instance: None + +  - name: tag the old vol valid label +    delegate_to: localhost +    ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}} +    args: +      tags: +        Name: "{{ ec2_tag_Name }} REMOVE ME" +    register: voltags + +  - name: Update the /etc/sysconfig/docker-storage-setup with new device +    lineinfile: +      dest: /etc/sysconfig/docker-storage-setup +      regexp: ^DEVS= +      line: DEVS=/dev/xvdc diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 new file mode 100644 index 000000000..acfa89515 --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -0,0 +1,20 @@ +version: 0.1 +log: +  level: debug +http: +  addr: :5000 +storage: +  cache: +    layerinfo: inmemory +  s3: +    accesskey: {{ aws_access_key }} +    secretkey: {{ aws_secret_key }} +    region: us-east-1 +    bucket: {{ clusterid }}-docker +    encrypt: true +    secure: true +    v4auth: true +    rootdirectory: /registry +middleware: +  repository: +    - name: openshift diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml new file mode 100644 index 000000000..4dcef1a42 --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -0,0 +1,71 @@ +--- +# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. +# Usage: +#  ansible-playbook s3_registry.yml -e clusterid="mycluster" +# +# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. +# The 'clusterid' is the short name of your cluster. + +- hosts: tag_env-host-type_{{ clusterid }}-openshift-master +  remote_user: root +  gather_facts: False + +  vars: +    aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" +    aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" + +  tasks: + +  - name: Check for AWS creds +    fail:  +      msg: "Couldn't find {{ item }} creds in ENV" +    when: "{{ item }} == ''" +    with_items: +    - aws_access_key +    - aws_secret_key + +  - name: Scale down registry +    command: oc scale --replicas=0 dc/docker-registry + +  - name: Create S3 bucket +    local_action: +      module: s3 bucket="{{ clusterid }}-docker" mode=create + +  - name: Set up registry environment variable +    command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml + +  - name: Generate docker registry config +    template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 + +  - name: Determine if new secrets are needed +    command: oc get secrets +    register: secrets + +  - name: Create registry secrets +    command: oc secrets new dockerregistry /root/config.yml +    when: "'dockerregistry' not in secrets.stdout" + +  - name: Determine if service account contains secrets +    command: oc describe serviceaccount/registry +    register: serviceaccount + +  - name: Add secrets to registry service account +    command: oc secrets add serviceaccount/registry secrets/dockerregistry +    when: "'dockerregistry' not in serviceaccount.stdout" + +  - name: Determine if deployment config contains secrets +    command: oc volume dc/docker-registry --list +    register: dc + +  - name: Add secrets to registry deployment config +    command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry +    when: "'dockersecrets' not in dc.stdout" + +  - name: Wait for deployment config to take effect before scaling up +    pause: seconds=30 + +  - name: Scale up registry +    command: oc scale --replicas=1 dc/docker-registry + +  - name: Delete temporary config file +    file: path=/root/config.yml state=absent diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml new file mode 100644 index 000000000..40db668da --- /dev/null +++ b/playbooks/adhoc/uninstall.yml @@ -0,0 +1,134 @@ +# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift +# Enterprise content installed by ansible.  This includes: +#  +#    configuration +#    containers +#    example templates and imagestreams +#    images +#    RPMs +--- +- hosts: +    - OSEv3:children + +  sudo: yes + +  tasks: +    - service: name={{ item }} state=stopped +      with_items: +        - atomic-enterprise-master +        - atomic-enterprise-node +        - atomic-openshift-master +        - atomic-openshift-master-api +        - atomic-openshift-master-controllers +        - atomic-openshift-node +        - etcd +        - openshift-master +        - openshift-master-api +        - openshift-master-controllers +        - openshift-node +        - openvswitch +        - origin-master +        - origin-master-api +        - origin-master-controllers +        - origin-node + +    - yum: name={{ item }} state=absent +      with_items: +        - atomic-enterprise +        - atomic-enterprise-master +        - atomic-enterprise-node +        - atomic-enterprise-sdn-ovs +        - atomic-openshift +        - atomic-openshift-clients +        - atomic-openshift-master +        - atomic-openshift-node +        - atomic-openshift-sdn-ovs +        - etcd +        - openshift +        - openshift-master +        - openshift-node +        - openshift-sdn +        - openshift-sdn-ovs +        - openvswitch +        - origin +        - origin-master +        - origin-node +        - origin-sdn-ovs +        - tuned-profiles-atomic-enterprise-node +        - tuned-profiles-atomic-openshift-node +        - tuned-profiles-openshift-node +        - tuned-profiles-origin-node + +    - shell: systemctl reset-failed +      changed_when: False + +    - shell: systemctl daemon-reload +      changed_when: False + +    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node  +      changed_when: False +      failed_when: False +      with_items: +        - openshift-enterprise +        - atomic-enterprise +        - origin + +    - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}' +      changed_when: False +      failed_when: False +      register: exited_containers_to_delete +      with_items: +        - aep3/aep +        - openshift3/ose +        - openshift/origin + +    - shell: "docker rm {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ exited_containers_to_delete.results }}" + +    - shell: docker images | grep {{ item }} | awk '{ print $3 }' +      changed_when: False +      failed_when: False +      register: images_to_delete +      with_items: +        - registry.access.redhat.com/openshift3 +        - registry.access.redhat.com/aep3 +        - docker.io/openshift + +    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ images_to_delete.results }}" + +    - file: path={{ item }} state=absent +      with_items: +        - /etc/ansible/facts.d/openshift.fact +        - /etc/atomic-enterprise +        - /etc/etcd +        - /etc/openshift +        - /etc/openshift-sdn +        - /etc/origin +        - /etc/sysconfig/atomic-enterprise-master +        - /etc/sysconfig/atomic-enterprise-node +        - /etc/sysconfig/atomic-openshift-master +        - /etc/sysconfig/atomic-openshift-node +        - /etc/sysconfig/openshift-master +        - /etc/sysconfig/openshift-node +        - /etc/sysconfig/origin-master +        - /etc/sysconfig/origin-node +        - /root/.kube +        - /usr/share/openshift/examples +        - /var/lib/atomic-enterprise +        - /var/lib/etcd +        - /var/lib/openshift +        - /var/lib/origin diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md new file mode 100644 index 000000000..6de8a970f --- /dev/null +++ b/playbooks/adhoc/upgrades/README.md @@ -0,0 +1,21 @@ +# [NOTE] +This playbook will re-run installation steps overwriting any local +modifications. You should ensure that your inventory has been updated with any +modifications you've made after your initial installation. If you find any items +that cannot be configured via ansible please open an issue at +https://github.com/openshift/openshift-ansible + +# Overview +This playbook is available as a technical preview. It currently performs the +following steps. + + * Upgrade and restart master services + * Upgrade and restart node services + * Applies latest configuration by re-running the installation playbook + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +# Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/adhoc/upgrades/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins new file mode 120000 index 000000000..73cafffe5 --- /dev/null +++ b/playbooks/adhoc/upgrades/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/adhoc/upgrades/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml new file mode 100644 index 000000000..56a1df860 --- /dev/null +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -0,0 +1,128 @@ +--- +- name: Re-Run cluster configuration to apply latest configuration changes +  include: ../../common/openshift-cluster/config.yml +  vars: +    g_etcd_group: "{{ 'etcd' }}" +    g_masters_group: "{{ 'masters' }}" +    g_nodes_group: "{{ 'nodes' }}" +    openshift_cluster_id: "{{ cluster_id | default('default') }}" +    openshift_deployment_type: "{{ deployment_type }}" + +- name: Upgrade masters +  hosts: masters +  vars: +    openshift_version: "{{ openshift_pkg_version | default('') }}" +  tasks: +    - name: Upgrade master packages +      yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest +    - name: Restart master services +      service: name="{{ openshift.common.service_type}}-master" state=restarted + +- name: Upgrade nodes +  hosts: nodes +  vars: +    openshift_version: "{{ openshift_pkg_version | default('') }}" +  tasks: +    - name: Upgrade node packages +      yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest +    - name: Restart node services +      service: name="{{ openshift.common.service_type }}-node" state=restarted + +- name: Determine new master version +  hosts: oo_first_master +  tasks: +    - name: Determine new version +      command: > +        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master +      register: _new_version + +- name: Ensure AOS 3.0.2 or Origin 1.0.6 +  hosts: oo_first_master +  tasks: +    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later +    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) + +- name: Update cluster policy +  hosts: oo_first_master +  tasks: +    - name: oadm policy reconcile-cluster-roles --confirm +      command: > +        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig +        policy reconcile-cluster-roles --confirm + +- name: Update cluster policy bindings +  hosts: oo_first_master +  tasks: +    - name: oadm policy reconcile-cluster-role-bindings --confirm +      command: > +        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig +        policy reconcile-cluster-role-bindings +        --exclude-groups=system:authenticated +        --exclude-groups=system:unauthenticated +        --exclude-users=system:anonymous +        --additive-only=true --confirm +      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + +- name: Upgrade default router +  hosts: oo_first_master +  vars: +    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" +    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" +  tasks: +    - name: Check for default router +      command: > +        {{ oc_cmd }} get -n default dc/router +      register: _default_router +      failed_when: false +      changed_when: false +    - name: Check for allowHostNetwork and allowHostPorts +      when: _default_router.rc == 0 +      shell: > +        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork +      register: _scc +    - name: Grant allowHostNetwork and allowHostPorts +      when: +        - _default_router.rc == 0 +        - "'false' in _scc.stdout" +      command: > +        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9 +    - name: Update deployment config to 1.0.4/3.0.1 spec +      when: _default_router.rc == 0 +      command: > +        {{ oc_cmd }} patch dc/router -p +        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}' +    - name: Switch to hostNetwork=true +      when: _default_router.rc == 0 +      command: > +        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' +    - name: Update router image to current version +      when: _default_router.rc == 0 +      command: > +        {{ oc_cmd }} patch dc/router -p +        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + +- name: Upgrade default +  hosts: oo_first_master +  vars: +    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}" +    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" +  tasks: +    - name: Check for default registry +      command: > +          {{ oc_cmd }} get -n default dc/docker-registry +      register: _default_registry +      failed_when: false +      changed_when: false +    - name: Update registry image to current version +      when: _default_registry.rc == 0 +      command: > +        {{ oc_cmd }} patch dc/docker-registry -p +        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + +- name: Update image streams and templates +  hosts: oo_first_master +  vars: +    openshift_examples_import_command: "update" +    openshift_deployment_type: "{{ deployment_type }}" +  roles: +    - openshift_examples diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml index a31cbef65..1e884240a 100644 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -2,50 +2,57 @@  - hosts: localhost    gather_facts: no    vars: -    g_zserver: http://localhost/zabbix/api_jsonrpc.php -    g_zuser: Admin -    g_zpassword: zabbix +    g_server: http://localhost:8080/zabbix/api_jsonrpc.php +    g_user: '' +    g_password: '' +    roles: -  - ../../../roles/os_zabbix -  post_tasks: +  - lib_zabbix -  - zbx_template: -      server: "{{ g_zserver }}" -      user: "{{ g_zuser }}" -      password: "{{ g_zpassword }}" +  post_tasks: +  - name: CLEAN List template for heartbeat +    zbx_template: +      zbx_server: "{{ g_server }}" +      zbx_user: "{{ g_user }}" +      zbx_password: "{{ g_password }}"        state: list        name: 'Template Heartbeat'      register: templ_heartbeat -  - zbx_template: -      server: "{{ g_zserver }}" -      user: "{{ g_zuser }}" -      password: "{{ g_zpassword }}" +  - name: CLEAN List template app zabbix server +    zbx_template: +      zbx_server: "{{ g_server }}" +      zbx_user: "{{ g_user }}" +      zbx_password: "{{ g_password }}"        state: list        name: 'Template App Zabbix Server'      register: templ_zabbix_server -  - zbx_template: -      server: "{{ g_zserver }}" -      user: "{{ g_zuser }}" -      password: "{{ g_zpassword }}" +  - name: CLEAN List template app zabbix server +    zbx_template: +      zbx_server: "{{ g_server }}" +      zbx_user: "{{ g_user }}" +      zbx_password: "{{ g_password }}"        state: list        name: 'Template App Zabbix Agent'      register: templ_zabbix_agent -  - zbx_template: -      server: "{{ g_zserver }}" -      user: "{{ g_zuser }}" -      password: "{{ g_zpassword }}" +  - name: CLEAN List all templates +    zbx_template: +      zbx_server: "{{ g_server }}" +      zbx_user: "{{ g_user }}" +      zbx_password: "{{ g_password }}"        state: list      register: templates    - debug: var=templ_heartbeat.results -  - zbx_template: -      server: "{{ g_zserver }}" -      user: "{{ g_zuser }}" -      password: "{{ g_zpassword }}" +  - name: Remove templates if heartbeat template is missing +    zbx_template: +      zbx_server: "{{ g_server }}" +      zbx_user: "{{ g_user }}" +      zbx_password: "{{ g_password }}" +      name: "{{ item }}"        state: absent      with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"      when:  templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml deleted file mode 100644 index 50fff53b2..000000000 --- a/playbooks/adhoc/zabbix_setup/create_template.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- debug: var=ctp_template - -- name: Create Template -  zbx_template: -    server: "{{ ctp_zserver }}" -    user: "{{ ctp_zuser }}" -    password: "{{ ctp_zpassword }}" -    name: "{{ ctp_template.name }}" -  register: ctp_created_template - -- debug: var=ctp_created_template - -#- name: Create Application -#  zbxapi: -#    server: "{{ ctp_zserver }}" -#    user: "{{ ctp_zuser }}" -#    password: "{{ ctp_zpassword }}" -#    zbx_class: Application -#    state: present -#    params: -#      name: "{{ ctp_template.application.name}}" -#      hostid: "{{ ctp_created_template.results[0].templateid }}" -#      search: -#        name: "{{ ctp_template.application.name}}" -#  register: ctp_created_application - -#- debug: var=ctp_created_application - -- name: Create Items -  zbx_item: -    server: "{{ ctp_zserver }}" -    user: "{{ ctp_zuser }}" -    password: "{{ ctp_zpassword }}" -    key: "{{ item.key }}" -    name: "{{ item.name | default(item.key, true) }}" -    value_type: "{{ item.value_type | default('int') }}" -    template_name: "{{ ctp_template.name }}" -  with_items: ctp_template.zitems -  register: ctp_created_items - -#- debug: var=ctp_created_items - -- name: Create Triggers -  zbx_trigger: -    server: "{{ ctp_zserver }}" -    user: "{{ ctp_zuser }}" -    password: "{{ ctp_zpassword }}" -    description: "{{ item.description }}" -    expression: "{{ item.expression }}" -    priority: "{{ item.priority }}" -  with_items: ctp_template.ztriggers -  when: ctp_template.ztriggers is defined - -#- debug: var=ctp_created_triggers - - diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins index 99a95e4ca..b0b7a3414 120000 --- a/playbooks/adhoc/zabbix_setup/filter_plugins +++ b/playbooks/adhoc/zabbix_setup/filter_plugins @@ -1 +1 @@ -../../../filter_plugins
\ No newline at end of file +../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml new file mode 100755 index 000000000..0fe65b338 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml @@ -0,0 +1,7 @@ +#!/usr/bin/env ansible-playbook +--- +- include: clean_zabbix.yml +  vars: +    g_server: http://localhost/zabbix/api_jsonrpc.php +    g_user: Admin +    g_password: zabbix diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml new file mode 100755 index 000000000..e2b8150c6 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml @@ -0,0 +1,13 @@ +#!/usr/bin/ansible-playbook +--- +- hosts: localhost +  gather_facts: no +  vars: +    g_server: http://localhost/zabbix/api_jsonrpc.php +    g_user: Admin +    g_password: zabbix +  roles: +  - role: os_zabbix +    ozb_server: "{{ g_server }}" +    ozb_user: "{{ g_user }}" +    ozb_password: "{{ g_password }}" diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles index e2b799b9d..20c4c58cf 120000 --- a/playbooks/adhoc/zabbix_setup/roles +++ b/playbooks/adhoc/zabbix_setup/roles @@ -1 +1 @@ -../../../roles/
\ No newline at end of file +../../../roles
\ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml deleted file mode 100644 index 1729194b5..000000000 --- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: localhost -  gather_facts: no -  vars_files: -  - vars/template_heartbeat.yml -  - vars/template_os_linux.yml -  vars: -    g_zserver: http://localhost/zabbix/api_jsonrpc.php -    g_zuser: Admin -    g_zpassword: zabbix -  roles: -  - ../../../roles/os_zabbix -  post_tasks: -  - zbx_template: -      server: "{{ g_zserver }}" -      user: "{{ g_zuser }}" -      password: "{{ g_zpassword }}" -      state: list -    register: templates - -  - debug: var=templates - -  - name: Include Template -    include: create_template.yml -    vars: -      ctp_template: "{{ g_template_heartbeat }}" -      ctp_zserver: "{{ g_zserver }}" -      ctp_zuser: "{{ g_zuser }}" -      ctp_zpassword: "{{ g_zpassword }}" - -  - name: Include Template -    include: create_template.yml -    vars: -      ctp_template: "{{ g_template_os_linux }}" -      ctp_zserver: "{{ g_zserver }}" -      ctp_zuser: "{{ g_zuser }}" -      ctp_zpassword: "{{ g_zpassword }}" - diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml deleted file mode 100644 index 22cc75554..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -g_template_heartbeat: -  name: Template Heartbeat -  zitems: -  - name: Heartbeat Ping -    hostid: -    key: heartbeat.ping -  ztriggers: -  - description: 'Heartbeat.ping has failed on {HOST.NAME}' -    expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' -    priority: avg diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml deleted file mode 100644 index e7cc667cb..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_host.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_host: -  params: -    name: Template Host -    host: Template Host -    groups: -    - groupid: 1 # FIXME (not real) -    output: extend -    search: -      name: Template Host -  zitems: -  - name: Host Ping -    hostid:  -    key_: host.ping -    type: 2 -    value_type: 0 -    output: extend -    search: -      key_: host.ping -  ztriggers: -  - description: 'Host ping has failed on {HOST.NAME}' -    expression: '{Template Host:host.ping.last()}<>0' -    priority: 3 -    searchWildcardsEnabled: True -    search: -      description: 'Host ping has failed on*' -    expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml deleted file mode 100644 index 5f9b41a4f..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_master.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_master: -  params: -    name: Template Master -    host: Template Master -    groups: -    - groupid: 1 # FIXME (not real) -    output: extend -    search: -      name: Template Master -  zitems: -  - name: Master Etcd Ping -    hostid:  -    key_: master.etcd.ping -    type: 2 -    value_type: 0 -    output: extend -    search: -      key_: master.etcd.ping -  ztriggers: -  - description: 'Master Etcd ping has failed on {HOST.NAME}' -    expression: '{Template Master:master.etcd.ping.last()}<>0' -    priority: 3 -    searchWildcardsEnabled: True -    search: -      description: 'Master Etcd ping has failed on*' -    expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml deleted file mode 100644 index 98c343a24..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_node.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_node: -  params: -    name: Template Node -    host: Template Node -    groups: -    - groupid: 1 # FIXME (not real) -    output: extend -    search: -      name: Template Node -  zitems: -  - name: Kubelet Ping -    hostid:  -    key_: kubelet.ping -    type: 2 -    value_type: 0 -    output: extend -    search: -      key_: kubelet.ping -  ztriggers: -  - description: 'Kubelet ping has failed on {HOST.NAME}' -    expression: '{Template Node:kubelet.ping.last()}<>0' -    priority: 3 -    searchWildcardsEnabled: True -    search: -      description: 'Kubelet ping has failed on*' -    expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml deleted file mode 100644 index 9cc038ffa..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -g_template_os_linux: -  name: Template OS Linux -  zitems: -  - key: kernel.uname.sysname -    value_type: string - -  - key: kernel.all.cpu.wait.total -    value_type: int - -  - key: kernel.all.cpu.irq.hard -    value_type: int - -  - key: kernel.all.cpu.idle -    value_type: int - -  - key: kernel.uname.distro -    value_type: string - -  - key: kernel.uname.nodename -    value_type: string - -  - key: kernel.all.cpu.irq.soft -    value_type: int - -  - key: kernel.all.load.15_minute -    value_type: float - -  - key: kernel.all.cpu.sys -    value_type: int - -  - key: kernel.all.load.5_minute -    value_type: float - -  - key: mem.freemem -    value_type: int - -  - key: kernel.all.cpu.nice -    value_type: int - -  - key: mem.util.bufmem -    value_type: int - -  - key: swap.used -    value_type: int - -  - key: kernel.all.load.1_minute -    value_type: float - -  - key: kernel.uname.version -    value_type: string - -  - key: swap.length -    value_type: int - -  - key: mem.physmem -    value_type: int - -  - key: kernel.all.uptime -    value_type: int - -  - key: swap.free -    value_type: int - -  - key: mem.util.used -    value_type: int - -  - key: kernel.all.cpu.user -    value_type: int - -  - key: kernel.uname.machine -    value_type: string - -  - key: hinv.ncpu -    value_type: int - -  - key: mem.util.cached -    value_type: int - -  - key: kernel.all.cpu.steal -    value_type: int - -  - key: kernel.all.pswitch -    value_type: int - -  - key: kernel.uname.release -    value_type: string - -  - key: proc.nprocs -    value_type: int diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml deleted file mode 100644 index 4dae7da1e..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_router.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_router: -  params: -    name: Template Router -    host: Template Router -    groups: -    - groupid: 1 # FIXME (not real) -    output: extend -    search: -      name: Template Router -  zitems: -  - name: Router Backends down -    hostid:  -    key_: router.backends.down -    type: 2 -    value_type: 0 -    output: extend -    search: -      key_: router.backends.down -  ztriggers: -  - description: 'Number of router backends down on {HOST.NAME}' -    expression: '{Template Router:router.backends.down.last()}<>0' -    priority: 3 -    searchWildcardsEnabled: True -    search: -      description: 'Number of router backends down on {HOST.NAME}' -    expandExpression: True  | 
