diff options
Diffstat (limited to 'roles')
42 files changed, 2553 insertions, 230 deletions
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index 241180e2c..3b80164cc 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -1,17 +1,16 @@  --- +- name: Load lib_openshift modules +  include_role: +    name: lib_openshift +  - name: Pull etcd system container    command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout" -- name: Check etcd system container package -  command: > -    atomic containers list --no-trunc -a -f container=etcd -  register: result -  - name: Set initial Etcd cluster    set_fact: -    etcd_initial_cluster: > +    etcd_initial_cluster: >-        {% for host in etcd_peers | default([]) -%}        {% if loop.last -%}        {{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }} @@ -20,44 +19,23 @@        {%- endif -%}        {% endfor -%} -- name: Update Etcd system container package -  command: > -    atomic containers update -    --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} -    --set ETCD_NAME={{ etcd_hostname }} -    --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }} -    --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} -    --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} -    --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} -    --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} -    --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} -    --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt -    --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt -    --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key -    --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt -    --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt -    --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key -    etcd -  when: -  - ("etcd" in result.stdout) - -- name: Install Etcd system container package -  command: > -    atomic install --system --name=etcd -    --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} -    --set ETCD_NAME={{ etcd_hostname }} -    --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }} -    --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} -    --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} -    --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} -    --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} -    --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} -    --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt -    --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt -    --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key -    --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt -    --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt -    --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key -    {{ openshift.etcd.etcd_image }} -  when: -  - ("etcd" not in result.stdout) +- name: Install or Update Etcd system container package +  oc_atomic_container: +    name: etcd +    image: "{{ openshift.etcd.etcd_image }}" +    state: latest +    values: +      - ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} +      - ETCD_NAME={{ etcd_hostname }} +      - ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }} +      - ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} +      - ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} +      - ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} +      - ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} +      - ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} +      - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt +      - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt +      - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key +      - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt +      - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt +      - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oadm_manage_node.py index ced04bf3d..6a3543742 100644 --- a/roles/lib_openshift/library/oadm_manage_node.py +++ b/roles/lib_openshift/library/oadm_manage_node.py @@ -1015,13 +1015,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 0b4a019f3..5b1f417b8 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1023,13 +1023,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index 1575b023f..7908dd91e 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1119,13 +1119,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None @@ -2063,20 +2063,21 @@ class Volume(object):          ''' return a properly structured volume '''          volume_mount = None          volume = {'name': volume_info['name']} -        if volume_info['type'] == 'secret': +        volume_type = volume_info['type'].lower() +        if volume_type == 'secret':              volume['secret'] = {}              volume[volume_info['type']] = {'secretName': volume_info['secret_name']}              volume_mount = {'mountPath': volume_info['path'],                              'name': volume_info['name']} -        elif volume_info['type'] == 'emptydir': +        elif volume_type == 'emptydir':              volume['emptyDir'] = {}              volume_mount = {'mountPath': volume_info['path'],                              'name': volume_info['name']} -        elif volume_info['type'] == 'pvc': +        elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':              volume['persistentVolumeClaim'] = {}              volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']              volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize'] -        elif volume_info['type'] == 'hostpath': +        elif volume_type == 'hostpath':              volume['hostPath'] = {}              volume['hostPath']['path'] = volume_info['path'] diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 52863b5f3..8bbe50ea3 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1144,13 +1144,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py new file mode 100644 index 000000000..d2620b4cc --- /dev/null +++ b/roles/lib_openshift/library/oc_atomic_container.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: doc/atomic_container -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_atomic_container +short_description: Manage the container images on the atomic host platform +description: +    - Manage the container images on the atomic host platform +    - Allows to execute the commands on the container images +requirements: +  - atomic +  - "python >= 2.6" +options: +    name: +        description: +          - Name of the container +        required: True +        default: null +    image: +        description: +          - The image to use to install the container +        required: True +        default: null +    state: +        description: +          - State of the container +        required: True +        choices: ["latest", "absent", "latest", "rollback"] +        default: "latest" +    values: +        description: +          - Values for the installation of the container +        required: False +        default: None +''' + +# -*- -*- -*- End included fragment: doc/atomic_container -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_atomic_container.py -*- -*- -*- + +# pylint: disable=wrong-import-position,too-many-branches,invalid-name +import json +from ansible.module_utils.basic import AnsibleModule + + +def _install(module, container, image, values_list): +    ''' install a container using atomic CLI.  values_list is the list of --set arguments. +    container is the name given to the container.  image is the image to use for the installation. ''' +    args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        return rc, out, err, False +    else: +        changed = "Extracting" in out +        return rc, out, err, changed + +def _uninstall(module, name): +    ''' uninstall an atomic container by its name. ''' +    args = ['atomic', 'uninstall', name] +    rc, out, err = module.run_command(args, check_rc=False) +    return rc, out, err, False + + +def do_install(module, container, image, values_list): +    ''' install a container and exit the module. ''' +    rc, out, err, changed = _install(module, container, image, values_list) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    else: +        module.exit_json(msg=out, changed=changed) + + +def do_uninstall(module, name): +    ''' uninstall a container and exit the module. ''' +    rc, out, err, changed = _uninstall(module, name) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    module.exit_json(msg=out, changed=changed) + + +def do_update(module, container, old_image, image, values_list): +    ''' update a container and exit the module.  If the container uses a different +    image than the current installed one, then first uninstall the old one ''' + +    # the image we want is different than the installed one +    if old_image != image: +        rc, out, err, _ = _uninstall(module, container) +        if rc != 0: +            module.fail_json(rc=rc, msg=err) +        return do_install(module, container, image, values_list) + +    # if the image didn't change, use "atomic containers update" +    args = ['atomic', 'containers', 'update'] + values_list + [container] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    else: +        changed = "Extracting" in out +        module.exit_json(msg=out, changed=changed) + + +def do_rollback(module, name): +    ''' move to the previous deployment of the container, if present, and exit the module. ''' +    args = ['atomic', 'containers', 'rollback', name] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    else: +        changed = "Rolling back" in out +        module.exit_json(msg=out, changed=changed) + + +def core(module): +    ''' entrypoint for the module. ''' +    name = module.params['name'] +    image = module.params['image'] +    values = module.params['values'] +    state = module.params['state'] + +    module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') +    out = {} +    err = {} +    rc = 0 + +    values_list = ["--set=%s" % x for x in values] if values else [] + +    args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +        return + +    containers = json.loads(out) +    present = len(containers) > 0 +    old_image = containers[0]["image_name"] if present else None + +    if state == 'present' and present: +        module.exit_json(msg=out, changed=False) +    elif (state in ['latest', 'present']) and not present: +        do_install(module, name, image, values_list) +    elif state == 'latest': +        do_update(module, name, old_image, image, values_list) +    elif state == 'absent': +        if not present: +            module.exit_json(msg="", changed=False) +        else: +            do_uninstall(module, name) +    elif state == 'rollback': +        do_rollback(module, name) + + +def main(): +    module = AnsibleModule( +        argument_spec=dict( +            name=dict(default=None, required=True), +            image=dict(default=None, required=True), +            state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), +            values=dict(type='list', default=[]), +            ), +        ) + +    # Verify that the platform supports atomic command +    rc, _, err = module.run_command('atomic -v', check_rc=False) +    if rc != 0: +        module.fail_json(msg="Error in running atomic command", err=err) + +    try: +        core(module) +    except Exception as e:  # pylint: disable=broad-except +        module.fail_json(msg=str(e)) + + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_atomic_container.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 8901042ac..36d9394e1 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1043,13 +1043,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index a84b9f074..65b2bf860 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1010,13 +1010,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 7a4d6959a..ad2b5b5ed 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1019,13 +1019,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 0f56ce983..68e882167 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1022,13 +1022,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 4f53bb5d6..bf4a08c52 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1011,13 +1011,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index a2cbd9b93..9d95fcdbb 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1053,13 +1053,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index a7e3e111e..17e2d87c3 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -997,13 +997,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_sdnvalidator.py b/roles/lib_openshift/library/oc_sdnvalidator.py new file mode 100644 index 000000000..770be29d4 --- /dev/null +++ b/roles/lib_openshift/library/oc_sdnvalidator.py @@ -0,0 +1,1387 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/sdnvalidator -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_sdnvalidator +short_description: Validate SDN objects +description: +  - Validate SDN objects +options: +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +author: +- "Mo Khan <monis@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +oc_version: +- name: get oc sdnvalidator +  sdnvalidator: +  register: oc_sdnvalidator +''' + +# -*- -*- -*- End included fragment: doc/sdnvalidator -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    return None + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        cmds.extend(cmd) + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as err: +                        if "No JSON object could be decoded" in err.args: +                            err = err.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(value) +                            print(user_def[key]) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(api_values) +                        print(user_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key, data in self.config_options.items(): +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_sdnvalidator.py -*- -*- -*- + +# pylint: disable=too-many-instance-attributes +class OCSDNValidator(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' + +    def __init__(self, kubeconfig): +        ''' Constructor for OCSDNValidator ''' +        # namespace has no meaning for SDN validation, hardcode to 'default' +        super(OCSDNValidator, self).__init__('default', kubeconfig) + +    def get(self, kind, invalid_filter): +        ''' return SDN information ''' + +        rval = self._get(kind) +        if rval['returncode'] != 0: +            return False, rval, [] + +        return True, rval, filter(invalid_filter, rval['results'][0]['items']) + +    # pylint: disable=too-many-return-statements +    @staticmethod +    def run_ansible(params): +        ''' run the idempotent ansible code + +            params comes from the ansible portion of this module +        ''' + +        sdnvalidator = OCSDNValidator(params['kubeconfig']) +        all_invalid = {} +        failed = False + +        checks = ( +            ( +                'hostsubnet', +                lambda x: x['metadata']['name'] != x['host'], +                u'hostsubnets where metadata.name != host', +            ), +            ( +                'netnamespace', +                lambda x: x['metadata']['name'] != x['netname'], +                u'netnamespaces where metadata.name != netname', +            ), +        ) + +        for resource, invalid_filter, invalid_msg in checks: +            success, rval, invalid = sdnvalidator.get(resource, invalid_filter) +            if not success: +                return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval} +            if invalid: +                failed = True +                all_invalid[invalid_msg] = invalid + +        if failed: +            return {'failed': True, 'msg': 'All SDN objects are not valid.', 'state': 'list', 'results': all_invalid} + +        return {'msg': 'All SDN objects are valid.'} + +# -*- -*- -*- End included fragment: class/oc_sdnvalidator.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_sdnvalidator.py -*- -*- -*- + +def main(): +    ''' +    ansible oc module for validating OpenShift SDN objects +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +        ), +        supports_check_mode=False, +    ) + + +    rval = OCSDNValidator.run_ansible(module.params) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_sdnvalidator.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index db9a3a7ec..bf1f788eb 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1043,13 +1043,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index c8d4b3040..c789bbdac 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1049,13 +1049,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 3e650b5f2..2d485aec2 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -995,13 +995,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 749cf2d8e..0aa4d9017 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -995,13 +995,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index e9b970967..13b4504c7 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -967,13 +967,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py new file mode 100644 index 000000000..20d75cb63 --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py @@ -0,0 +1,137 @@ +# pylint: skip-file +# flake8: noqa + +# pylint: disable=wrong-import-position,too-many-branches,invalid-name +import json +from ansible.module_utils.basic import AnsibleModule + + +def _install(module, container, image, values_list): +    ''' install a container using atomic CLI.  values_list is the list of --set arguments. +    container is the name given to the container.  image is the image to use for the installation. ''' +    args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        return rc, out, err, False +    else: +        changed = "Extracting" in out +        return rc, out, err, changed + +def _uninstall(module, name): +    ''' uninstall an atomic container by its name. ''' +    args = ['atomic', 'uninstall', name] +    rc, out, err = module.run_command(args, check_rc=False) +    return rc, out, err, False + + +def do_install(module, container, image, values_list): +    ''' install a container and exit the module. ''' +    rc, out, err, changed = _install(module, container, image, values_list) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    else: +        module.exit_json(msg=out, changed=changed) + + +def do_uninstall(module, name): +    ''' uninstall a container and exit the module. ''' +    rc, out, err, changed = _uninstall(module, name) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    module.exit_json(msg=out, changed=changed) + + +def do_update(module, container, old_image, image, values_list): +    ''' update a container and exit the module.  If the container uses a different +    image than the current installed one, then first uninstall the old one ''' + +    # the image we want is different than the installed one +    if old_image != image: +        rc, out, err, _ = _uninstall(module, container) +        if rc != 0: +            module.fail_json(rc=rc, msg=err) +        return do_install(module, container, image, values_list) + +    # if the image didn't change, use "atomic containers update" +    args = ['atomic', 'containers', 'update'] + values_list + [container] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    else: +        changed = "Extracting" in out +        module.exit_json(msg=out, changed=changed) + + +def do_rollback(module, name): +    ''' move to the previous deployment of the container, if present, and exit the module. ''' +    args = ['atomic', 'containers', 'rollback', name] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +    else: +        changed = "Rolling back" in out +        module.exit_json(msg=out, changed=changed) + + +def core(module): +    ''' entrypoint for the module. ''' +    name = module.params['name'] +    image = module.params['image'] +    values = module.params['values'] +    state = module.params['state'] + +    module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') +    out = {} +    err = {} +    rc = 0 + +    values_list = ["--set=%s" % x for x in values] if values else [] + +    args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name] +    rc, out, err = module.run_command(args, check_rc=False) +    if rc != 0: +        module.fail_json(rc=rc, msg=err) +        return + +    containers = json.loads(out) +    present = len(containers) > 0 +    old_image = containers[0]["image_name"] if present else None + +    if state == 'present' and present: +        module.exit_json(msg=out, changed=False) +    elif (state in ['latest', 'present']) and not present: +        do_install(module, name, image, values_list) +    elif state == 'latest': +        do_update(module, name, old_image, image, values_list) +    elif state == 'absent': +        if not present: +            module.exit_json(msg="", changed=False) +        else: +            do_uninstall(module, name) +    elif state == 'rollback': +        do_rollback(module, name) + + +def main(): +    module = AnsibleModule( +        argument_spec=dict( +            name=dict(default=None, required=True), +            image=dict(default=None, required=True), +            state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), +            values=dict(type='list', default=[]), +            ), +        ) + +    # Verify that the platform supports atomic command +    rc, _, err = module.run_command('atomic -v', check_rc=False) +    if rc != 0: +        module.fail_json(msg="Error in running atomic command", err=err) + +    try: +        core(module) +    except Exception as e:  # pylint: disable=broad-except +        module.fail_json(msg=str(e)) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/ansible/oc_sdnvalidator.py b/roles/lib_openshift/src/ansible/oc_sdnvalidator.py new file mode 100644 index 000000000..e91417d63 --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_sdnvalidator.py @@ -0,0 +1,24 @@ +# pylint: skip-file +# flake8: noqa + +def main(): +    ''' +    ansible oc module for validating OpenShift SDN objects +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +        ), +        supports_check_mode=False, +    ) + + +    rval = OCSDNValidator.run_ansible(module.params) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/class/oc_sdnvalidator.py b/roles/lib_openshift/src/class/oc_sdnvalidator.py new file mode 100644 index 000000000..da923337b --- /dev/null +++ b/roles/lib_openshift/src/class/oc_sdnvalidator.py @@ -0,0 +1,58 @@ +# pylint: skip-file +# flake8: noqa + +# pylint: disable=too-many-instance-attributes +class OCSDNValidator(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' + +    def __init__(self, kubeconfig): +        ''' Constructor for OCSDNValidator ''' +        # namespace has no meaning for SDN validation, hardcode to 'default' +        super(OCSDNValidator, self).__init__('default', kubeconfig) + +    def get(self, kind, invalid_filter): +        ''' return SDN information ''' + +        rval = self._get(kind) +        if rval['returncode'] != 0: +            return False, rval, [] + +        return True, rval, filter(invalid_filter, rval['results'][0]['items']) + +    # pylint: disable=too-many-return-statements +    @staticmethod +    def run_ansible(params): +        ''' run the idempotent ansible code + +            params comes from the ansible portion of this module +        ''' + +        sdnvalidator = OCSDNValidator(params['kubeconfig']) +        all_invalid = {} +        failed = False + +        checks = ( +            ( +                'hostsubnet', +                lambda x: x['metadata']['name'] != x['host'], +                u'hostsubnets where metadata.name != host', +            ), +            ( +                'netnamespace', +                lambda x: x['metadata']['name'] != x['netname'], +                u'netnamespaces where metadata.name != netname', +            ), +        ) + +        for resource, invalid_filter, invalid_msg in checks: +            success, rval, invalid = sdnvalidator.get(resource, invalid_filter) +            if not success: +                return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval} +            if invalid: +                failed = True +                all_invalid[invalid_msg] = invalid + +        if failed: +            return {'failed': True, 'msg': 'All SDN objects are not valid.', 'state': 'list', 'results': all_invalid} + +        return {'msg': 'All SDN objects are valid.'} diff --git a/roles/lib_openshift/src/doc/atomic_container b/roles/lib_openshift/src/doc/atomic_container new file mode 100644 index 000000000..53fc40f36 --- /dev/null +++ b/roles/lib_openshift/src/doc/atomic_container @@ -0,0 +1,36 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_atomic_container +short_description: Manage the container images on the atomic host platform +description: +    - Manage the container images on the atomic host platform +    - Allows to execute the commands on the container images +requirements: +  - atomic +  - "python >= 2.6" +options: +    name: +        description: +          - Name of the container +        required: True +        default: null +    image: +        description: +          - The image to use to install the container +        required: True +        default: null +    state: +        description: +          - State of the container +        required: True +        choices: ["latest", "absent", "latest", "rollback"] +        default: "latest" +    values: +        description: +          - Values for the installation of the container +        required: False +        default: None +''' diff --git a/roles/lib_openshift/src/doc/sdnvalidator b/roles/lib_openshift/src/doc/sdnvalidator new file mode 100644 index 000000000..0b1862ed1 --- /dev/null +++ b/roles/lib_openshift/src/doc/sdnvalidator @@ -0,0 +1,27 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_sdnvalidator +short_description: Validate SDN objects +description: +  - Validate SDN objects +options: +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +author: +- "Mo Khan <monis@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +oc_version: +- name: get oc sdnvalidator +  sdnvalidator: +  register: oc_sdnvalidator +''' diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 2e822d8ef..53b9f9947 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -255,13 +255,13 @@ class OpenShiftCLI(object):          if oadm:              cmds.append('adm') +        cmds.extend(cmd) +          if self.all_namespaces:              cmds.extend(['--all-namespaces'])          elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501              cmds.extend(['-n', self.namespace]) -        cmds.extend(cmd) -          rval = {}          results = ''          err = None diff --git a/roles/lib_openshift/src/lib/volume.py b/roles/lib_openshift/src/lib/volume.py index 84ef1f705..e0abb1d1b 100644 --- a/roles/lib_openshift/src/lib/volume.py +++ b/roles/lib_openshift/src/lib/volume.py @@ -17,20 +17,21 @@ class Volume(object):          ''' return a properly structured volume '''          volume_mount = None          volume = {'name': volume_info['name']} -        if volume_info['type'] == 'secret': +        volume_type = volume_info['type'].lower() +        if volume_type == 'secret':              volume['secret'] = {}              volume[volume_info['type']] = {'secretName': volume_info['secret_name']}              volume_mount = {'mountPath': volume_info['path'],                              'name': volume_info['name']} -        elif volume_info['type'] == 'emptydir': +        elif volume_type == 'emptydir':              volume['emptyDir'] = {}              volume_mount = {'mountPath': volume_info['path'],                              'name': volume_info['name']} -        elif volume_info['type'] == 'pvc': +        elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':              volume['persistentVolumeClaim'] = {}              volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']              volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize'] -        elif volume_info['type'] == 'hostpath': +        elif volume_type == 'hostpath':              volume['hostPath'] = {}              volume['hostPath']['path'] = volume_info['path'] diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml index 35f8d71b2..a48fdf0c2 100644 --- a/roles/lib_openshift/src/sources.yml +++ b/roles/lib_openshift/src/sources.yml @@ -49,6 +49,12 @@ oc_adm_router.py:  - class/oc_adm_router.py  - ansible/oc_adm_router.py +oc_atomic_container.py: +- doc/generated +- doc/license +- doc/atomic_container +- ansible/oc_atomic_container.py +  oc_edit.py:  - doc/generated  - doc/license @@ -176,3 +182,13 @@ oc_version.py:  - lib/base.py  - class/oc_version.py  - ansible/oc_version.py + +oc_sdnvalidator.py: +- doc/generated +- doc/license +- lib/import.py +- doc/sdnvalidator +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- class/oc_sdnvalidator.py +- ansible/oc_sdnvalidator.py diff --git a/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py b/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py new file mode 100755 index 000000000..49e2aadb2 --- /dev/null +++ b/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py @@ -0,0 +1,481 @@ +#!/usr/bin/env python2 +''' + Unit tests for oc sdnvalidator +''' +# To run +# ./oc_sdnvalidator.py +# +# .... +# ---------------------------------------------------------------------- +# Ran 4 tests in 0.002s +# +# OK + +import os +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_sdnvalidator import OCSDNValidator  # noqa: E402 + + +class OCSDNValidatorTest(unittest.TestCase): +    ''' +     Test class for OCSDNValidator +    ''' + +    @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy') +    @mock.patch('oc_sdnvalidator.OCSDNValidator._run') +    def test_no_data(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing when both SDN objects are empty ''' + +        # Arrange + +        # run_ansible input parameters +        params = { +            'kubeconfig': '/etc/origin/master/admin.kubeconfig', +        } + +        empty = '''{ +    "apiVersion": "v1", +    "items": [], +    "kind": "List", +    "metadata": {}, +    "resourceVersion": "", +    "selfLink": "" +}''' + +        # Return values of our mocked function call. These get returned once per call. +        mock_cmd.side_effect = [ +            # First call to mock +            (0, empty, ''), + +            # Second call to mock +            (0, empty, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        # Act +        results = OCSDNValidator.run_ansible(params) + +        # Assert +        self.assertNotIn('failed', results) +        self.assertEqual(results['msg'], 'All SDN objects are valid.') + +        # Making sure our mock was called as we expected +        mock_cmd.assert_has_calls([ +            mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None), +            mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None), +        ]) + +    @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy') +    @mock.patch('oc_sdnvalidator.OCSDNValidator._run') +    def test_error_code(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing when both we fail to get SDN objects ''' + +        # Arrange + +        # run_ansible input parameters +        params = { +            'kubeconfig': '/etc/origin/master/admin.kubeconfig', +        } + +        # Return values of our mocked function call. These get returned once per call. +        mock_cmd.side_effect = [ +            # First call to mock +            (1, '', 'Error.'), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        error_results = { +            'returncode': 1, +            'stderr': 'Error.', +            'stdout': '', +            'cmd': 'oc -n default get hostsubnet -o json', +            'results': [{}] +        } + +        # Act +        results = OCSDNValidator.run_ansible(params) + +        # Assert +        self.assertTrue(results['failed']) +        self.assertEqual(results['msg'], 'Failed to GET hostsubnet.') +        self.assertEqual(results['state'], 'list') +        self.assertEqual(results['results'], error_results) + +        # Making sure our mock was called as we expected +        mock_cmd.assert_has_calls([ +            mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None), +        ]) + +    @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy') +    @mock.patch('oc_sdnvalidator.OCSDNValidator._run') +    def test_valid_both(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing when both SDN objects are valid ''' + +        # Arrange + +        # run_ansible input parameters +        params = { +            'kubeconfig': '/etc/origin/master/admin.kubeconfig', +        } + +        valid_hostsubnet = '''{ +    "apiVersion": "v1", +    "items": [ +        { +            "apiVersion": "v1", +            "host": "bar0", +            "hostIP": "1.1.1.1", +            "kind": "HostSubnet", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:47:09Z", +                "name": "bar0", +                "namespace": "", +                "resourceVersion": "986", +                "selfLink": "/oapi/v1/hostsubnetsbar0", +                "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff" +            }, +            "subnet": "1.1.0.0/24" +        }, +        { +            "apiVersion": "v1", +            "host": "bar1", +            "hostIP": "1.1.1.1", +            "kind": "HostSubnet", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:47:18Z", +                "name": "bar1", +                "namespace": "", +                "resourceVersion": "988", +                "selfLink": "/oapi/v1/hostsubnetsbar1", +                "uid": "57710d84-f478-11e6-aae0-507b9dac97ff" +            }, +            "subnet": "1.1.0.0/24" +        }, +        { +            "apiVersion": "v1", +            "host": "bar2", +            "hostIP": "1.1.1.1", +            "kind": "HostSubnet", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:47:26Z", +                "name": "bar2", +                "namespace": "", +                "resourceVersion": "991", +                "selfLink": "/oapi/v1/hostsubnetsbar2", +                "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff" +            }, +            "subnet": "1.1.0.0/24" +        } +    ], +    "kind": "List", +    "metadata": {}, +    "resourceVersion": "", +    "selfLink": "" +    }''' + +        valid_netnamespace = '''{ +    "apiVersion": "v1", +    "items": [ +        { +            "apiVersion": "v1", +            "kind": "NetNamespace", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:45:16Z", +                "name": "foo0", +                "namespace": "", +                "resourceVersion": "959", +                "selfLink": "/oapi/v1/netnamespacesfoo0", +                "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff" +            }, +            "netid": 100, +            "netname": "foo0" +        }, +        { +            "apiVersion": "v1", +            "kind": "NetNamespace", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:45:26Z", +                "name": "foo1", +                "namespace": "", +                "resourceVersion": "962", +                "selfLink": "/oapi/v1/netnamespacesfoo1", +                "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff" +            }, +            "netid": 100, +            "netname": "foo1" +        }, +        { +            "apiVersion": "v1", +            "kind": "NetNamespace", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:45:36Z", +                "name": "foo2", +                "namespace": "", +                "resourceVersion": "965", +                "selfLink": "/oapi/v1/netnamespacesfoo2", +                "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff" +            }, +            "netid": 100, +            "netname": "foo2" +        } +    ], +    "kind": "List", +    "metadata": {}, +    "resourceVersion": "", +    "selfLink": "" +    }''' + +        # Return values of our mocked function call. These get returned once per call. +        mock_cmd.side_effect = [ +            # First call to mock +            (0, valid_hostsubnet, ''), + +            # Second call to mock +            (0, valid_netnamespace, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        # Act +        results = OCSDNValidator.run_ansible(params) + +        # Assert +        self.assertNotIn('failed', results) +        self.assertEqual(results['msg'], 'All SDN objects are valid.') + +        # Making sure our mock was called as we expected +        mock_cmd.assert_has_calls([ +            mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None), +            mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None), +        ]) + +    @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy') +    @mock.patch('oc_sdnvalidator.OCSDNValidator._run') +    def test_invalid_both(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing when both SDN objects are invalid ''' + +        # Arrange + +        # run_ansible input parameters +        params = { +            'kubeconfig': '/etc/origin/master/admin.kubeconfig', +        } + +        invalid_hostsubnet = '''{ +    "apiVersion": "v1", +    "items": [ +        { +            "apiVersion": "v1", +            "host": "bar0", +            "hostIP": "1.1.1.1", +            "kind": "HostSubnet", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:47:09Z", +                "name": "bar0", +                "namespace": "", +                "resourceVersion": "986", +                "selfLink": "/oapi/v1/hostsubnetsbar0", +                "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff" +            }, +            "subnet": "1.1.0.0/24" +        }, +        { +            "apiVersion": "v1", +            "host": "bar1", +            "hostIP": "1.1.1.1", +            "kind": "HostSubnet", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:47:18Z", +                "name": "bar1", +                "namespace": "", +                "resourceVersion": "988", +                "selfLink": "/oapi/v1/hostsubnetsbar1", +                "uid": "57710d84-f478-11e6-aae0-507b9dac97ff" +            }, +            "subnet": "1.1.0.0/24" +        }, +        { +            "apiVersion": "v1", +            "host": "bar2", +            "hostIP": "1.1.1.1", +            "kind": "HostSubnet", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:47:26Z", +                "name": "bar2", +                "namespace": "", +                "resourceVersion": "991", +                "selfLink": "/oapi/v1/hostsubnetsbar2", +                "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff" +            }, +            "subnet": "1.1.0.0/24" +        }, +        { +            "apiVersion": "v1", +            "host": "baz1", +            "hostIP": "1.1.1.1", +            "kind": "HostSubnet", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:47:49Z", +                "name": "baz0", +                "namespace": "", +                "resourceVersion": "996", +                "selfLink": "/oapi/v1/hostsubnetsbaz0", +                "uid": "69f75f87-f478-11e6-aae0-507b9dac97ff" +            }, +            "subnet": "1.1.0.0/24" +        } +    ], +    "kind": "List", +    "metadata": {}, +    "resourceVersion": "", +    "selfLink": "" +}''' + +        invalid_netnamespace = '''{ +    "apiVersion": "v1", +    "items": [ +        { +            "apiVersion": "v1", +            "kind": "NetNamespace", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:45:52Z", +                "name": "bar0", +                "namespace": "", +                "resourceVersion": "969", +                "selfLink": "/oapi/v1/netnamespacesbar0", +                "uid": "245d416e-f478-11e6-aae0-507b9dac97ff" +            }, +            "netid": 100, +            "netname": "bar1" +        }, +        { +            "apiVersion": "v1", +            "kind": "NetNamespace", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:45:16Z", +                "name": "foo0", +                "namespace": "", +                "resourceVersion": "959", +                "selfLink": "/oapi/v1/netnamespacesfoo0", +                "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff" +            }, +            "netid": 100, +            "netname": "foo0" +        }, +        { +            "apiVersion": "v1", +            "kind": "NetNamespace", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:45:26Z", +                "name": "foo1", +                "namespace": "", +                "resourceVersion": "962", +                "selfLink": "/oapi/v1/netnamespacesfoo1", +                "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff" +            }, +            "netid": 100, +            "netname": "foo1" +        }, +        { +            "apiVersion": "v1", +            "kind": "NetNamespace", +            "metadata": { +                "creationTimestamp": "2017-02-16T18:45:36Z", +                "name": "foo2", +                "namespace": "", +                "resourceVersion": "965", +                "selfLink": "/oapi/v1/netnamespacesfoo2", +                "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff" +            }, +            "netid": 100, +            "netname": "foo2" +        } +    ], +    "kind": "List", +    "metadata": {}, +    "resourceVersion": "", +    "selfLink": "" +}''' + +        invalid_results = { +            'hostsubnets where metadata.name != host': [{ +                'apiVersion': 'v1', +                'host': 'baz1', +                'hostIP': '1.1.1.1', +                'kind': 'HostSubnet', +                'metadata': { +                    'creationTimestamp': '2017-02-16T18:47:49Z', +                    'name': 'baz0', +                    'namespace': '', +                    'resourceVersion': '996', +                    'selfLink': '/oapi/v1/hostsubnetsbaz0', +                    'uid': '69f75f87-f478-11e6-aae0-507b9dac97ff' +                }, +                'subnet': '1.1.0.0/24' +            }], +            'netnamespaces where metadata.name != netname': [{ +                'apiVersion': 'v1', +                'kind': 'NetNamespace', +                'metadata': { +                    'creationTimestamp': '2017-02-16T18:45:52Z', +                    'name': 'bar0', +                    'namespace': '', +                    'resourceVersion': '969', +                    'selfLink': '/oapi/v1/netnamespacesbar0', +                    'uid': '245d416e-f478-11e6-aae0-507b9dac97ff' +                }, +                'netid': 100, +                'netname': 'bar1' +            }], +        } + +        # Return values of our mocked function call. These get returned once per call. +        mock_cmd.side_effect = [ +            # First call to mock +            (0, invalid_hostsubnet, ''), + +            # Second call to mock +            (0, invalid_netnamespace, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        # Act +        results = OCSDNValidator.run_ansible(params) + +        # Assert +        self.assertTrue(results['failed']) +        self.assertEqual(results['msg'], 'All SDN objects are not valid.') +        self.assertEqual(results['state'], 'list') +        self.assertEqual(results['results'], invalid_results) + +        # Making sure our mock was called as we expected +        mock_cmd.assert_has_calls([ +            mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None), +            mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None), +        ]) + + +if __name__ == '__main__': +    unittest.main() diff --git a/roles/lib_openshift/src/test/unit/test_oc_env.py b/roles/lib_openshift/src/test/unit/test_oc_env.py index dab5099c2..45a3ef1eb 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_env.py +++ b/roles/lib_openshift/src/test/unit/test_oc_env.py @@ -147,7 +147,7 @@ class OCEnvTest(unittest.TestCase):          # Making sure our mocks were called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None), +            mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),          ])      @mock.patch('oc_env.locate_oc_binary') @@ -333,7 +333,7 @@ class OCEnvTest(unittest.TestCase):          # Making sure our mocks were called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None), +            mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),          ])      @mock.patch('oc_env.locate_oc_binary') @@ -448,7 +448,7 @@ class OCEnvTest(unittest.TestCase):          # Making sure our mocks were called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None), +            mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),          ])      @unittest.skipIf(six.PY3, 'py2 test only') diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py index ea94bfabd..e0f6d2f3c 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_route.py +++ b/roles/lib_openshift/src/test/unit/test_oc_route.py @@ -135,7 +135,7 @@ class OCRouteTest(unittest.TestCase):          # Making sure our mock was called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None), +            mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),          ])      @mock.patch('oc_route.locate_oc_binary') @@ -265,9 +265,9 @@ metadata:          # Making sure our mock was called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None), -            mock.call(['oc', '-n', 'default', 'create', '-f', mock.ANY], None), -            mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None), +            mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), +            mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),          ])      @unittest.skipIf(six.PY3, 'py2 test only') diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py index 087c62dcf..bf496769a 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_secret.py +++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py @@ -85,8 +85,8 @@ class OCSecretTest(unittest.TestCase):          # Making sure our mock was called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'secrets', 'testsecretname', '-o', 'json'], None), -            mock.call(['oc', '-n', 'default', 'secrets', 'new', 'testsecretname', mock.ANY], None), +            mock.call(['oc', 'get', 'secrets', 'testsecretname', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'secrets', 'new', 'testsecretname', mock.ANY, '-n', 'default'], None),          ])          mock_write.assert_has_calls([ diff --git a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py index b02b37053..3572a6728 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py +++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py @@ -111,9 +111,9 @@ class OCServiceAccountTest(unittest.TestCase):          # Making sure our mock was called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None), -            mock.call(['oc', '-n', 'default', 'create', '-f', mock.ANY], None), -            mock.call(['oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None), +            mock.call(['oc', 'get', 'sa', 'testserviceaccountname', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), +            mock.call(['oc', 'get', 'sa', 'testserviceaccountname', '-o', 'json', '-n', 'default'], None),          ])      @unittest.skipIf(six.PY3, 'py2 test only') diff --git a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py index ab8ccd18c..d78349e34 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py +++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py @@ -181,10 +181,10 @@ secrets:          # Making sure our mocks were called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None), -            mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None), -            mock.call(['oc', '-n', 'default', 'replace', '-f', mock.ANY], None), -            mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None) +            mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None), +            mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None)          ])          yaml_file = builder_pyyaml_file @@ -304,9 +304,9 @@ secrets:          # Making sure our mocks were called as we expected          mock_cmd.assert_has_calls([ -            mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None), -            mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None), -            mock.call(['oc', '-n', 'default', 'replace', '-f', mock.ANY], None), +            mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None),          ])          yaml_file = builder_pyyaml_file diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/filters.py new file mode 100644 index 000000000..cbfadfe9d --- /dev/null +++ b/roles/openshift_hosted/filter_plugins/filters.py @@ -0,0 +1,35 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +Custom filters for use in openshift_hosted +''' + + +class FilterModule(object): +    ''' Custom ansible filters for use by openshift_hosted role''' + +    @staticmethod +    def get_router_replicas(replicas=None, router_nodes=None): +        ''' This function will return the number of replicas +            based on the results from the defined +            openshift.hosted.router.replicas OR +            the query from oc_obj on openshift nodes with a selector OR +            default to 1 + +        ''' +        # We always use what they've specified if they've specified a value +        if replicas is not None: +            return replicas + +        if (isinstance(router_nodes, dict) and +                'results' in router_nodes and +                'results' in router_nodes['results'] and +                'items' in router_nodes['results']['results']): + +            return len(router_nodes['results']['results'][0]['items']) + +        return 1 + +    def filters(self): +        ''' returns a mapping of filters to methods ''' +        return {'get_router_replicas': self.get_router_replicas} diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index cad5c666c..dcb1277f3 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -53,6 +53,19 @@      openshift_hosted_registry_force:      - False +- name: create the default registry service +  oc_service: +    namespace: "{{ openshift_hosted_registry_namespace }}" +    name: "{{ openshift_hosted_registry_name }}" +    ports: +    - name: 5000-tcp +      port: 5000 +      protocol: TCP +      targetPort: 5000 +    selector: "{{ openshift_hosted_registry_selector }}" +    session_affinity: ClientIP +    service_type: ClusterIP +  - include: secure.yml    static: no    run_once: true @@ -64,13 +77,14 @@    when:    - openshift.hosted.registry.storage.kind | default(none) == 'object' -- name: Set facts for persistent volume +- name: Update openshift_hosted facts for persistent volumes    set_fact: +    openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(pvc_volume_mounts) }}" +  vars:      pvc_volume_mounts:      - name: registry-storage        type: persistentVolumeClaim        claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim" -    openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(pvc_volume_mounts) }}"    when:    - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack'] diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/registry/storage/s3.yml index 69b91be0b..26f921f15 100644 --- a/roles/openshift_hosted/tasks/registry/storage/s3.yml +++ b/roles/openshift_hosted/tasks/registry/storage/s3.yml @@ -36,13 +36,14 @@        - path: cloudfront.pem          data: "{{ lookup('file', openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile) }}" -  - name: Add cloudfront secret to the registry volumes +  - name: Append cloudfront secret registry volume to openshift_hosted_registry_volumes      set_fact: +      openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(s3_volume_mount) }}" +    vars:        s3_volume_mount:        - name: cloudfront-vol          path: /etc/origin          type: secret          secret_name: docker-registry-s3-cloudfront -      openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(s3_volume_mount) }}"    when: openshift_hosted_registry_storage_s3_cloudfront_baseurl | default(none) is not none diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 3535d5f5e..607ace7f9 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -10,7 +10,7 @@  - name: set_fact replicas    set_fact: -    replicas: "{{ openshift.hosted.router.replicas | default(router_nodes.results.results[0]['items'] | length) }}" +    replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"  - block: diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index a0e1ac75e..1b3e0dba1 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -1,4 +1,8 @@  --- +- name: Load lib_openshift modules +  include_role: +    name: lib_openshift +  - name: Pre-pull master system container image    command: >      atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} @@ -10,68 +14,31 @@      atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master    register: result -- name: Update Master system container package -  command: > -    atomic containers update {{ openshift.common.service_type }}-master -  register: update_result -  changed_when: "'Extracting' in update_result.stdout" -  when: -    - ("master" in result.stdout) -    - l_is_same_version -    - not l_is_ha - -- name: Uninstall Master system container package -  command: > -    atomic uninstall {{ openshift.common.service_type }}-master -  failed_when: False -  when: -    - ("master" in result.stdout) -    - not l_is_same_version -    - not l_is_ha - -- name: Install Master system container package -  command: > -    atomic install --system --name={{ openshift.common.service_type }}-master {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} +- name: Install or Update master system container +  oc_atomic_container: +    name: "{{ openshift.common.service_type }}-master" +    image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    state: latest    when: -    - not l_is_same_version or ("master" not in result.stdout) | bool      - not l_is_ha -  notify: -    - restart master  # HA -- name: Update Master HA system container package -  command: > -    atomic containers update {{ openshift.common.service_type }}-master-{{ item }} -  register: update_result -  changed_when: "'Extracting' in update_result.stdout" -  with_items: -    - api -    - controllers -  when: -    - ("master" in result.stdout) -    - l_is_same_version -    - l_is_ha - -- name: Uninstall Master HA system container package -  command: > -    atomic uninstall {{ openshift.common.service_type }}-master-{{ item }} -  failed_when: False -  with_items: -    - api -    - controllers +- name: Install or Update HA api master system container +  oc_atomic_container: +    name: "{{ openshift.common.service_type }}-master-api" +    image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    state: latest +    values: +      - COMMAND=api    when: -    - ("master" in result.stdout) -    - not l_is_same_version      - l_is_ha -- name: Install Master HA system container package -  command: > -    atomic install --system --set COMMAND={{ item }} --name={{ openshift.common.service_type }}-master-{{ item }} {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} -  with_items: -    - api -    - controllers +- name: Install or Update HA controller master system container +  oc_atomic_container: +    name: "{{ openshift.common.service_type }}-master-controllers" +    image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    state: latest +    values: +      - COMMAND=controllers    when: -    - not l_is_same_version or ("master" not in result.stdout) | bool      - l_is_ha -  notify: -    - restart master diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index 1e157097d..c5ba20409 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -20,4 +20,3 @@ openshift_master_valid_grant_methods:  - deny  l_is_ha: "{{ openshift.master.ha is defined and openshift.master.ha | bool }}" -l_is_same_version: "{{ (openshift.common.version is defined) and (openshift.common.version == openshift_version) | bool }}" diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 3ae5c7600..abe139418 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -1,36 +1,16 @@  --- +- name: Load lib_openshift modules +  include_role: +    name: lib_openshift +  - name: Pre-pull node system container image    command: >      atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout" -- name: Check Node system container package -  command: > -    atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-node -  register: result - -- name: Update Node system container package -  command: > -    atomic containers update {{ openshift.common.service_type }}-node -  register: update_result -  changed_when: "'Extracting' in update_result.stdout" -  when: -  - l_is_same_version -  - ("node" in result.stdout) - -- name: Uninstall Node system container package -  command: > -    atomic uninstall {{ openshift.common.service_type }}-node -  failed_when: False -  when: -  - not l_is_same_version -  - ("node" in result.stdout) - -- name: Install Node system container package -  command: > -    atomic install --system --name={{ openshift.common.service_type }}-node {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }} -  register: install_node_result -  changed_when: "'Extracting' in pull_result.stdout" -  when: -  - not l_is_same_version or ("node" not in result.stdout) | bool +- name: Install or Update node system container +  oc_atomic_container: +    name: "{{ openshift.common.service_type }}-node" +    image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" +    state: latest diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index 6114230d0..b76ce8797 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -1,36 +1,16 @@  --- +- name: Load lib_openshift modules +  include_role: +    name: lib_openshift +  - name: Pre-pull OpenVSwitch system container image    command: >      atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout" -- name: Check OpenvSwitch system container package -  command: > -    atomic containers list --no-trunc -a -f container=openvswitch -  register: result - -- name: Update OpenvSwitch system container package -  command: > -    atomic containers update openvswitch -  register: update_result -  changed_when: "'Extracting' in update_result.stdout" -  when: -  - l_is_same_version -  - ("openvswitch" in result.stdout) | bool - -- name: Uninstall OpenvSwitch system container package -  command: > -    atomic uninstall openvswitch -  failed_when: False -  when: -  - not l_is_same_version -  - ("openvswitch" in result.stdout) | bool - -- name: Install OpenvSwitch system container package -  command: > -    atomic install --system --name=openvswitch {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} -  when: -  - not l_is_same_version or ("openvswitch" not in result.stdout) | bool -  notify: -  - restart docker +- name: Install or Update OpenVSwitch system container +  oc_atomic_container: +    name: openvswitch +    image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}" +    state: latest diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml deleted file mode 100644 index 0c2abf3b9..000000000 --- a/roles/openshift_node/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -l_is_same_version: "{{ (openshift.common.version is defined) and (openshift.common.version == openshift_version) | bool }}"  | 
