diff options
Diffstat (limited to 'roles')
47 files changed, 2784 insertions, 147 deletions
| diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index 87ed7fee7..f2ef4f161 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -63,10 +63,12 @@    changed_when: false    when: check_docker_registry_exists.rc == 0 +# TODO: Need to fix the origin and enterprise templates so that they both respect IMAGE_PREFIX  - name: Deploy registry-console    command: >      {{ openshift.common.client_binary }} new-app --template=registry-console      {% if openshift_cockpit_deployer_prefix is defined  %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %} +    {% if openshift_cockpit_deployer_version is defined  %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}      -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"      -p REGISTRY_HOST="{{ docker_registry_route.stdout }}"      -p COCKPIT_KUBE_URL="{{ registry_console_cockpit_kube_url.stdout }}" diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 2ec62c37c..e0746d70d 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -1,5 +1,5 @@  --- -etcd_service: "{{ 'etcd' if not etcd_is_containerized | bool else 'etcd_container' }}" +etcd_service: "{{ 'etcd' if openshift.common.is_etcd_system_container | bool or not etcd_is_containerized | bool else 'etcd_container' }}"  etcd_client_port: 2379  etcd_peer_port: 2380  etcd_url_scheme: http diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 41f25be70..5f3ca461e 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -14,13 +14,17 @@    command: docker pull {{ openshift.etcd.etcd_image }}    register: pull_result    changed_when: "'Downloaded newer image' in pull_result.stdout" -  when: etcd_is_containerized | bool +  when: +  - etcd_is_containerized | bool +  - not openshift.common.is_etcd_system_container | bool  - name: Install etcd container service file    template:      dest: "/etc/systemd/system/etcd_container.service"      src: etcd.docker.service -  when: etcd_is_containerized | bool +  when: +  - etcd_is_containerized | bool +  - not openshift.common.is_etcd_system_container | bool  - name: Ensure etcd datadir exists when containerized    file: @@ -36,10 +40,22 @@      enabled: no      masked: yes      daemon_reload: yes -  when: etcd_is_containerized | bool +  when: +  - etcd_is_containerized | bool +  - not openshift.common.is_etcd_system_container | bool    register: task_result    failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" +- name: Install etcd container service file +  template: +    dest: "/etc/systemd/system/etcd_container.service" +    src: etcd.docker.service +  when: etcd_is_containerized | bool and not openshift.common.is_etcd_system_container | bool + +- name: Install Etcd system container +  include: system_container.yml +  when: etcd_is_containerized | bool and openshift.common.is_etcd_system_container | bool +  - name: Validate permissions on the config dir    file:      path: "{{ etcd_conf_dir }}" @@ -54,7 +70,7 @@      dest: /etc/etcd/etcd.conf      backup: true    notify: -    - restart etcd +  - restart etcd  - name: Enable etcd    systemd: diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml new file mode 100644 index 000000000..241180e2c --- /dev/null +++ b/roles/etcd/tasks/system_container.yml @@ -0,0 +1,63 @@ +--- +- name: Pull etcd system container +  command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }} +  register: pull_result +  changed_when: "'Pulling layer' in pull_result.stdout" + +- name: Check etcd system container package +  command: > +    atomic containers list --no-trunc -a -f container=etcd +  register: result + +- name: Set initial Etcd cluster +  set_fact: +    etcd_initial_cluster: > +      {% for host in etcd_peers | default([]) -%} +      {% if loop.last -%} +      {{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }} +      {%- else -%} +      {{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}, +      {%- endif -%} +      {% endfor -%} + +- name: Update Etcd system container package +  command: > +    atomic containers update +    --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} +    --set ETCD_NAME={{ etcd_hostname }} +    --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }} +    --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} +    --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} +    --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} +    --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} +    --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} +    --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt +    --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt +    --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key +    --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt +    --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt +    --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key +    etcd +  when: +  - ("etcd" in result.stdout) + +- name: Install Etcd system container package +  command: > +    atomic install --system --name=etcd +    --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }} +    --set ETCD_NAME={{ etcd_hostname }} +    --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }} +    --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }} +    --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }} +    --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }} +    --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }} +    --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }} +    --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt +    --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt +    --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key +    --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt +    --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt +    --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key +    {{ openshift.etcd.etcd_image }} +  when: +  - ("etcd" not in result.stdout) diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index 93633e3e6..2eb9af921 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -1,6 +1,7 @@  ---  # etcd server vars -etcd_conf_dir: /etc/etcd +etcd_conf_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container else '/var/lib/etcd/etcd.etcd/etc'  }}" +etcd_system_container_conf_dir: /var/lib/etcd/etc  etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"  etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"  etcd_key_file: "{{ etcd_conf_dir }}/server.key" diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py new file mode 100644 index 000000000..702cb02d4 --- /dev/null +++ b/roles/lib_openshift/library/oc_process.py @@ -0,0 +1,1493 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +import ruamel.yaml as yaml +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/process -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_process +short_description: Module to process openshift templates +description: +  - Process openshift templates programmatically. +options: +  state: +    description: +    - State has a few different meanings when it comes to process. +    - state: present - This state runs an `oc process <template>`.  When used in +    - conjunction with 'create: True' the process will be piped to | oc create -f +    - state: absent - will remove a template +    - state: list - will perform an `oc get template <template_name>` +    default: present +    choices: ["present", "absent", "list"] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  template_name: +    description: +    - Name of the openshift template that is being processed. +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the template lives. +    required: false +    default: default +    aliases: [] +  content: +    description: +    - Template content that will be processed. +    required: false +    default: None +    aliases: [] +  params: +    description: +    - A list of parameters that will be inserted into the template. +    required: false +    default: None +    aliases: [] +  create: +    description: +    - Whether or not to create the template after being processed. e.g.  oc process | oc create -f - +    required: False +    default: False +    aliases: [] +  reconcile: +    description: +    - Whether or not to attempt to determine if there are updates or changes in the incoming template. +    default: true +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: process the cloud volume provisioner template with variables +  oc_process: +    namespace: openshift-infra +    template_name: online-volume-provisioner +    create: True +    params: +      PLAT: rhel7 +  register: processout +  run_once: true +- debug: var=processout +''' + +# -*- -*- -*- End included fragment: doc/process -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    return None + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # pylint: disable=no-member +        if hasattr(self.yaml_dict, 'fa'): +            self.yaml_dict.fa.set_block_style() + +        Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader) +                # pylint: disable=no-member +                if hasattr(self.yaml_dict, 'fa'): +                    self.yaml_dict.fa.set_block_style() +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # pylint: disable=no-member,maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # pylint: disable=no-member,maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # pylint: disable=no-member,maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # pylint: disable=no-member,maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # pylint: disable=no-member,maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                  default_flow_style=False), +                             yaml.RoundTripLoader) +        # pylint: disable=no-member +        if hasattr(self.yaml_dict, 'fa'): +            tmp_copy.fa.set_block_style() +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False),  # noqa: E501 +                                 yaml.RoundTripLoader) +            # pylint: disable=no-member +            if hasattr(self.yaml_dict, 'fa'): +                tmp_copy.fa.set_block_style() +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout, stderr + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [] +        if oadm: +            cmds = ['oadm'] +        else: +            cmds = ['oc'] + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        cmds.extend(cmd) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        returncode, stdout, stderr = self._run(cmds, input_data) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as err: +                        if "No JSON object could be decoded" in err.args: +                            err = err.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            contents = yaml.load(contents, yaml.RoundTripLoader) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(value) +                            print(user_def[key]) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(api_values) +                        print(user_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key, data in self.config_options.items(): +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_process.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class OCProcess(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' + +    # pylint allows 5. we need 6 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 namespace, +                 tname=None, +                 params=None, +                 create=False, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 tdata=None, +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        super(OCProcess, self).__init__(namespace, kubeconfig) +        self.namespace = namespace +        self.name = tname +        self.data = tdata +        self.params = params +        self.create = create +        self.kubeconfig = kubeconfig +        self.verbose = verbose +        self._template = None + +    @property +    def template(self): +        '''template property''' +        if self._template is None: +            results = self._process(self.name, False, self.params, self.data) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Error processing template [%s].' % self.name) +            self._template = results['results']['items'] + +        return self._template + +    def get(self): +        '''get the template''' +        results = self._get('template', self.name) +        if results['returncode'] != 0: +            # Does the template exist?? +            if 'not found' in results['stderr']: +                results['returncode'] = 0 +                results['exists'] = False +                results['results'] = [] + +        return results + +    def delete(self, obj): +        '''delete a resource''' +        return self._delete(obj['kind'], obj['metadata']['name']) + +    def create_obj(self, obj): +        '''create a resource''' +        return self._create_from_content(obj['metadata']['name'], obj) + +    def process(self, create=None): +        '''process a template''' +        do_create = False +        if create != None: +            do_create = create +        else: +            do_create = self.create + +        return self._process(self.name, do_create, self.params, self.data) + +    def exists(self): +        '''return whether the template exists''' +        # Always return true if we're being passed template data +        if self.data: +            return True +        t_results = self._get('template', self.name) + +        if t_results['returncode'] != 0: +            # Does the template exist?? +            if 'not found' in t_results['stderr']: +                return False +            else: +                raise OpenShiftCLIError('Something went wrong. %s' % t_results) + +        return True + +    def needs_update(self): +        '''attempt to process the template and return it for comparison with oc objects''' +        obj_results = [] +        for obj in self.template: + +            # build a list of types to skip +            skip = [] + +            if obj['kind'] == 'ServiceAccount': +                skip.extend(['secrets', 'imagePullSecrets']) +            if obj['kind'] == 'BuildConfig': +                skip.extend(['lastTriggeredImageID']) +            if obj['kind'] == 'ImageStream': +                skip.extend(['generation']) +            if obj['kind'] == 'DeploymentConfig': +                skip.extend(['lastTriggeredImage']) + +            # fetch the current object +            curr_obj_results = self._get(obj['kind'], obj['metadata']['name']) +            if curr_obj_results['returncode'] != 0: +                # Does the template exist?? +                if 'not found' in curr_obj_results['stderr']: +                    obj_results.append((obj, True)) +                    continue + +            # check the generated object against the existing object +            if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip): +                obj_results.append((obj, True)) +                continue + +            obj_results.append((obj, False)) + +        return obj_results + +    # pylint: disable=too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run the ansible idempotent code''' + +        ocprocess = OCProcess(params['namespace'], +                              params['template_name'], +                              params['params'], +                              params['create'], +                              kubeconfig=params['kubeconfig'], +                              tdata=params['content'], +                              verbose=params['debug']) + +        state = params['state'] + +        api_rval = ocprocess.get() + +        if state == 'list': +            if api_rval['returncode'] != 0: +                return {"failed": True, "msg" : api_rval} + +            return {"changed" : False, "results": api_rval, "state": "list"} + +        elif state == 'present': +            if check_mode and params['create']: +                return {"changed": True, 'msg': "CHECK_MODE: Would have processed template."} + +            if not ocprocess.exists() or not params['reconcile']: +            #FIXME: this code will never get run in a way that succeeds when +            #       module.params['reconcile'] is true. Because oc_process doesn't +            #       create the actual template, the check of ocprocess.exists() +            #       is meaningless. Either it's already here and this code +            #       won't be run, or this code will fail because there is no +            #       template available for oc process to use. Have we conflated +            #       the template's existence with the existence of the objects +            #       it describes? + +            # Create it here +                api_rval = ocprocess.process() +                if api_rval['returncode'] != 0: +                    return {"failed": True, "msg": api_rval} + +                if params['create']: +                    return {"changed": True, "results": api_rval, "state": "present"} + +                return {"changed": False, "results": api_rval, "state": "present"} + +        # verify results +        update = False +        rval = [] +        all_results = ocprocess.needs_update() +        for obj, status in all_results: +            if status: +                ocprocess.delete(obj) +                results = ocprocess.create_obj(obj) +                results['kind'] = obj['kind'] +                rval.append(results) +                update = True + +        if not update: +            return {"changed": update, "results": api_rval, "state": "present"} + +        for cmd in rval: +            if cmd['returncode'] != 0: +                return {"failed": True, "changed": update, "results": rval, "state": "present"} + +        return {"changed": update, "results": rval, "state": "present"} + + +# -*- -*- -*- End included fragment: class/oc_process.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_process.py -*- -*- -*- + + +def main(): +    ''' +    ansible oc module for processing templates +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', choices=['present', 'list']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            template_name=dict(default=None, type='str'), +            content=dict(default=None, type='str'), +            params=dict(default=None, type='dict'), +            create=dict(default=False, type='bool'), +            reconcile=dict(default=True, type='bool'), +        ), +        supports_check_mode=True, +    ) + +    rval = OCProcess.run_ansible(module.params, module.check_mode) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_process.py -*- -*- -*- diff --git a/roles/lib_openshift/src/ansible/oc_process.py b/roles/lib_openshift/src/ansible/oc_process.py new file mode 100644 index 000000000..17cf865b7 --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_process.py @@ -0,0 +1,32 @@ +# pylint: skip-file +# flake8: noqa + + +def main(): +    ''' +    ansible oc module for processing templates +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', choices=['present', 'list']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            template_name=dict(default=None, type='str'), +            content=dict(default=None, type='str'), +            params=dict(default=None, type='dict'), +            create=dict(default=False, type='bool'), +            reconcile=dict(default=True, type='bool'), +        ), +        supports_check_mode=True, +    ) + +    rval = OCProcess.run_ansible(module.params, module.check_mode) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py new file mode 100644 index 000000000..80d81448d --- /dev/null +++ b/roles/lib_openshift/src/class/oc_process.py @@ -0,0 +1,188 @@ +# pylint: skip-file +# flake8: noqa + + +# pylint: disable=too-many-instance-attributes +class OCProcess(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' + +    # pylint allows 5. we need 6 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 namespace, +                 tname=None, +                 params=None, +                 create=False, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 tdata=None, +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        super(OCProcess, self).__init__(namespace, kubeconfig) +        self.namespace = namespace +        self.name = tname +        self.data = tdata +        self.params = params +        self.create = create +        self.kubeconfig = kubeconfig +        self.verbose = verbose +        self._template = None + +    @property +    def template(self): +        '''template property''' +        if self._template is None: +            results = self._process(self.name, False, self.params, self.data) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Error processing template [%s].' % self.name) +            self._template = results['results']['items'] + +        return self._template + +    def get(self): +        '''get the template''' +        results = self._get('template', self.name) +        if results['returncode'] != 0: +            # Does the template exist?? +            if 'not found' in results['stderr']: +                results['returncode'] = 0 +                results['exists'] = False +                results['results'] = [] + +        return results + +    def delete(self, obj): +        '''delete a resource''' +        return self._delete(obj['kind'], obj['metadata']['name']) + +    def create_obj(self, obj): +        '''create a resource''' +        return self._create_from_content(obj['metadata']['name'], obj) + +    def process(self, create=None): +        '''process a template''' +        do_create = False +        if create != None: +            do_create = create +        else: +            do_create = self.create + +        return self._process(self.name, do_create, self.params, self.data) + +    def exists(self): +        '''return whether the template exists''' +        # Always return true if we're being passed template data +        if self.data: +            return True +        t_results = self._get('template', self.name) + +        if t_results['returncode'] != 0: +            # Does the template exist?? +            if 'not found' in t_results['stderr']: +                return False +            else: +                raise OpenShiftCLIError('Something went wrong. %s' % t_results) + +        return True + +    def needs_update(self): +        '''attempt to process the template and return it for comparison with oc objects''' +        obj_results = [] +        for obj in self.template: + +            # build a list of types to skip +            skip = [] + +            if obj['kind'] == 'ServiceAccount': +                skip.extend(['secrets', 'imagePullSecrets']) +            if obj['kind'] == 'BuildConfig': +                skip.extend(['lastTriggeredImageID']) +            if obj['kind'] == 'ImageStream': +                skip.extend(['generation']) +            if obj['kind'] == 'DeploymentConfig': +                skip.extend(['lastTriggeredImage']) + +            # fetch the current object +            curr_obj_results = self._get(obj['kind'], obj['metadata']['name']) +            if curr_obj_results['returncode'] != 0: +                # Does the template exist?? +                if 'not found' in curr_obj_results['stderr']: +                    obj_results.append((obj, True)) +                    continue + +            # check the generated object against the existing object +            if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip): +                obj_results.append((obj, True)) +                continue + +            obj_results.append((obj, False)) + +        return obj_results + +    # pylint: disable=too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run the ansible idempotent code''' + +        ocprocess = OCProcess(params['namespace'], +                              params['template_name'], +                              params['params'], +                              params['create'], +                              kubeconfig=params['kubeconfig'], +                              tdata=params['content'], +                              verbose=params['debug']) + +        state = params['state'] + +        api_rval = ocprocess.get() + +        if state == 'list': +            if api_rval['returncode'] != 0: +                return {"failed": True, "msg" : api_rval} + +            return {"changed" : False, "results": api_rval, "state": "list"} + +        elif state == 'present': +            if check_mode and params['create']: +                return {"changed": True, 'msg': "CHECK_MODE: Would have processed template."} + +            if not ocprocess.exists() or not params['reconcile']: +            #FIXME: this code will never get run in a way that succeeds when +            #       module.params['reconcile'] is true. Because oc_process doesn't +            #       create the actual template, the check of ocprocess.exists() +            #       is meaningless. Either it's already here and this code +            #       won't be run, or this code will fail because there is no +            #       template available for oc process to use. Have we conflated +            #       the template's existence with the existence of the objects +            #       it describes? + +            # Create it here +                api_rval = ocprocess.process() +                if api_rval['returncode'] != 0: +                    return {"failed": True, "msg": api_rval} + +                if params['create']: +                    return {"changed": True, "results": api_rval, "state": "present"} + +                return {"changed": False, "results": api_rval, "state": "present"} + +        # verify results +        update = False +        rval = [] +        all_results = ocprocess.needs_update() +        for obj, status in all_results: +            if status: +                ocprocess.delete(obj) +                results = ocprocess.create_obj(obj) +                results['kind'] = obj['kind'] +                rval.append(results) +                update = True + +        if not update: +            return {"changed": update, "results": api_rval, "state": "present"} + +        for cmd in rval: +            if cmd['returncode'] != 0: +                return {"failed": True, "changed": update, "results": rval, "state": "present"} + +        return {"changed": update, "results": rval, "state": "present"} + diff --git a/roles/lib_openshift/src/doc/process b/roles/lib_openshift/src/doc/process new file mode 100644 index 000000000..86a854c07 --- /dev/null +++ b/roles/lib_openshift/src/doc/process @@ -0,0 +1,84 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_process +short_description: Module to process openshift templates +description: +  - Process openshift templates programmatically. +options: +  state: +    description: +    - State has a few different meanings when it comes to process. +    - state: present - This state runs an `oc process <template>`.  When used in +    - conjunction with 'create: True' the process will be piped to | oc create -f +    - state: absent - will remove a template +    - state: list - will perform an `oc get template <template_name>` +    default: present +    choices: ["present", "absent", "list"] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  template_name: +    description: +    - Name of the openshift template that is being processed. +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the template lives. +    required: false +    default: default +    aliases: [] +  content: +    description: +    - Template content that will be processed. +    required: false +    default: None +    aliases: [] +  params: +    description: +    - A list of parameters that will be inserted into the template. +    required: false +    default: None +    aliases: [] +  create: +    description: +    - Whether or not to create the template after being processed. e.g.  oc process | oc create -f - +    required: False +    default: False +    aliases: [] +  reconcile: +    description: +    - Whether or not to attempt to determine if there are updates or changes in the incoming template. +    default: true +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: process the cloud volume provisioner template with variables +  oc_process: +    namespace: openshift-infra +    template_name: online-volume-provisioner +    create: True +    params: +      PLAT: rhel7 +  register: processout +  run_once: true +- debug: var=processout +''' diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml index aa02ce120..e9056655d 100644 --- a/roles/lib_openshift/src/sources.yml +++ b/roles/lib_openshift/src/sources.yml @@ -39,6 +39,16 @@ oc_obj.py:  - class/oc_obj.py  - ansible/oc_obj.py +oc_process.py: +- doc/generated +- doc/license +- lib/import.py +- doc/process +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- class/oc_process.py +- ansible/oc_process.py +  oc_route.py:  - doc/generated  - doc/license diff --git a/roles/lib_openshift/src/test/integration/oc_process.yml b/roles/lib_openshift/src/test/integration/oc_process.yml new file mode 100755 index 000000000..7ea4c6b99 --- /dev/null +++ b/roles/lib_openshift/src/test/integration/oc_process.yml @@ -0,0 +1,83 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/:../../../../lib_utils/library + +--- +- hosts: "{{ cli_master_test }}" +  gather_facts: no +  user: root +  vars: +    template_name: mysql-ephemeral +    ns_name: test + +  post_tasks: +  - name: get the mysql-ephemeral template +    oc_obj: +      name: mysql-ephemeral +      state: list +      namespace: openshift +      kind: template +    register: mysqltempl + +  - name: fix namespace +    yedit: +      src: /tmp/mysql-template +      key: metadata.namespace +      value: test +      backup: false +      content: "{{ mysqltempl.results.results[0] | to_yaml }}" + +  - name: create the test namespace +    oc_obj: +      name: test +      state: present +      namespace: test +      kind: namespace +      content: +        path: /tmp/ns_test +        data: +          apiVersion: v1 +          kind: Namespace +          metadata: +            name: test +          spec: +            finalizers: +            - openshift.io/origin +            - kubernetes +    register: mysqltempl + +  - name: create the mysql-ephemeral template +    oc_obj: +      name: mysql-ephemeral +      state: present +      namespace: test +      kind: template +      files: +      - /tmp/mysql-template +      delete_after: True +    register: mysqltempl + +  - name: process mysql-ephemeral +    oc_process: +      template_name: mysql-ephemeral +      namespace: test +      params: +        NAMESPACE: test +        DATABASE_SERVICE_NAME: testdb +      create: False +      reconcile: false +    register: procout + +  - assert: +      that: +      - not procout.changed +      - procout.results.results['items'][0]['metadata']['name'] == 'testdb' +      - procout.results.results['items'][0]['kind'] == 'Service' +      - procout.results.results['items'][1]['metadata']['name'] == 'testdb' +      - procout.results.results['items'][1]['kind'] == 'DeploymentConfig' +      msg: process failed on template + +  - name: remove namespace test +    oc_obj: +      kind: namespace +      name: test +      namespace: test +      state: absent diff --git a/roles/lib_openshift/src/test/unit/oc_process.py b/roles/lib_openshift/src/test/unit/oc_process.py new file mode 100755 index 000000000..450ff7071 --- /dev/null +++ b/roles/lib_openshift/src/test/unit/oc_process.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python2 +''' + Unit tests for oc process +''' +# To run +# python -m unittest version +# +# . +# Ran 1 test in 0.597s +# +# OK + +import os +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_process import OCProcess  # noqa: E402 + + +# pylint: disable=too-many-public-methods +class OCProcessTest(unittest.TestCase): +    ''' +     Test class for OCProcess +    ''' +    mysql = '''{ +    "kind": "Template", +    "apiVersion": "v1", +    "metadata": { +        "name": "mysql-ephemeral", +        "namespace": "openshift", +        "selfLink": "/oapi/v1/namespaces/openshift/templates/mysql-ephemeral", +        "uid": "fb8b5f04-e3d3-11e6-a982-0e84250fc302", +        "resourceVersion": "480", +        "creationTimestamp": "2017-01-26T14:30:27Z", +        "annotations": { +            "iconClass": "icon-mysql-database", +            "openshift.io/display-name": "MySQL (Ephemeral)", +            "tags": "database,mysql" +        } +    }, +    "objects": [ +        { +            "apiVersion": "v1", +            "kind": "Service", +            "metadata": { +                "creationTimestamp": null, +                "name": "${DATABASE_SERVICE_NAME}" +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "mysql", +                        "nodePort": 0, +                        "port": 3306, +                        "protocol": "TCP", +                        "targetPort": 3306 +                    } +                ], +                "selector": { +                    "name": "${DATABASE_SERVICE_NAME}" +                }, +                "sessionAffinity": "None", +                "type": "ClusterIP" +            }, +            "status": { +                "loadBalancer": {} +            } +        }, +        { +            "apiVersion": "v1", +            "kind": "DeploymentConfig", +            "metadata": { +                "creationTimestamp": null, +                "name": "${DATABASE_SERVICE_NAME}" +            }, +            "spec": { +                "replicas": 1, +                "selector": { +                    "name": "${DATABASE_SERVICE_NAME}" +                }, +                "strategy": { +                    "type": "Recreate" +                }, +                "template": { +                    "metadata": { +                        "creationTimestamp": null, +                        "labels": { +                            "name": "${DATABASE_SERVICE_NAME}" +                        } +                    }, +                    "spec": { +                        "containers": [ +                            { +                                "capabilities": {}, +                                "env": [ +                                    { +                                        "name": "MYSQL_USER", +                                        "value": "${MYSQL_USER}" +                                    }, +                                    { +                                        "name": "MYSQL_PASSWORD", +                                        "value": "${MYSQL_PASSWORD}" +                                    }, +                                    { +                                        "name": "MYSQL_DATABASE", +                                        "value": "${MYSQL_DATABASE}" +                                    } +                                ], +                                "image": " ", +                                "imagePullPolicy": "IfNotPresent", +                                "livenessProbe": { +                                    "initialDelaySeconds": 30, +                                    "tcpSocket": { +                                        "port": 3306 +                                    }, +                                    "timeoutSeconds": 1 +                                }, +                                "name": "mysql", +                                "ports": [ +                                    { +                                        "containerPort": 3306, +                                        "protocol": "TCP" +                                    } +                                ], +                                "readinessProbe": { +                                    "exec": { +                                        "command": [ +                                            "/bin/sh", +                                            "-i", +                                            "-c", +                                            "MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'" +                                        ] +                                    }, +                                    "initialDelaySeconds": 5, +                                    "timeoutSeconds": 1 +                                }, +                                "resources": { +                                    "limits": { +                                        "memory": "${MEMORY_LIMIT}" +                                    } +                                }, +                                "securityContext": { +                                    "capabilities": {}, +                                    "privileged": false +                                }, +                                "terminationMessagePath": "/dev/termination-log", +                                "volumeMounts": [ +                                    { +                                        "mountPath": "/var/lib/mysql/data", +                                        "name": "${DATABASE_SERVICE_NAME}-data" +                                    } +                                ] +                            } +                        ], +                        "dnsPolicy": "ClusterFirst", +                        "restartPolicy": "Always", +                        "volumes": [ +                            { +                                "emptyDir": { +                                    "medium": "" +                                }, +                                "name": "${DATABASE_SERVICE_NAME}-data" +                            } +                        ] +                    } +                }, +                "triggers": [ +                    { +                        "imageChangeParams": { +                            "automatic": true, +                            "containerNames": [ +                                "mysql" +                            ], +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "mysql:${MYSQL_VERSION}", +                                "namespace": "${NAMESPACE}" +                            }, +                            "lastTriggeredImage": "" +                        }, +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    } +                ] +            }, +            "status": {} +        } +    ], +    "parameters": [ +        { +            "name": "MEMORY_LIMIT", +            "displayName": "Memory Limit", +            "description": "Maximum amount of memory the container can use.", +            "value": "512Mi" +        }, +        { +            "name": "NAMESPACE", +            "displayName": "Namespace", +            "description": "The OpenShift Namespace where the ImageStream resides.", +            "value": "openshift" +        }, +        { +            "name": "DATABASE_SERVICE_NAME", +            "displayName": "Database Service Name", +            "description": "The name of the OpenShift Service exposed for the database.", +            "value": "mysql", +            "required": true +        }, +        { +            "name": "MYSQL_USER", +            "displayName": "MySQL Connection Username", +            "description": "Username for MySQL user that will be used for accessing the database.", +            "generate": "expression", +            "from": "user[A-Z0-9]{3}", +            "required": true +        }, +        { +            "name": "MYSQL_PASSWORD", +            "displayName": "MySQL Connection Password", +            "description": "Password for the MySQL connection user.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{16}", +            "required": true +        }, +        { +            "name": "MYSQL_DATABASE", +            "displayName": "MySQL Database Name", +            "description": "Name of the MySQL database accessed.", +            "value": "sampledb", +            "required": true +        }, +        { +            "name": "MYSQL_VERSION", +            "displayName": "Version of MySQL Image", +            "description": "Version of MySQL image to be used (5.5, 5.6 or latest).", +            "value": "5.6", +            "required": true +        } +    ], +    "labels": { +        "template": "mysql-ephemeral-template" +    } +}''' + +    def setUp(self): +        ''' setup method will set to known configuration ''' +        pass + +    @mock.patch('oc_process.Utils.create_tmpfile_copy') +    @mock.patch('oc_process.OCProcess._run') +    def test_state_list(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing a get ''' +        params = {'template_name': 'mysql-ephermeral', +                  'namespace': 'test', +                  'content': None, +                  'state': 'list', +                  'reconcile': False, +                  'create': False, +                  'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'}, +                  'kubeconfig': '/etc/origin/master/admin.kubeconfig', +                  'debug': False} + +        mock_cmd.side_effect = [ +            (0, OCProcessTest.mysql, '') +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mock_kubeconfig', +        ] + +        results = OCProcess.run_ansible(params, False) + +        self.assertFalse(results['changed']) +        self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mysql-ephemeral') + +    @mock.patch('oc_process.Utils.create_tmpfile_copy') +    @mock.patch('oc_process.OCProcess._run') +    def test_process_no_create(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing a process with no create ''' +        params = {'template_name': 'mysql-ephermeral', +                  'namespace': 'test', +                  'content': None, +                  'state': 'present', +                  'reconcile': False, +                  'create': False, +                  'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'}, +                  'kubeconfig': '/etc/origin/master/admin.kubeconfig', +                  'debug': False} + +        mysqlproc = '''{ +    "kind": "List", +    "apiVersion": "v1", +    "metadata": {}, +    "items": [ +        { +            "apiVersion": "v1", +            "kind": "Service", +            "metadata": { +                "creationTimestamp": null, +                "labels": { +                    "template": "mysql-ephemeral-template" +                }, +                "name": "testdb" +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "mysql", +                        "nodePort": 0, +                        "port": 3306, +                        "protocol": "TCP", +                        "targetPort": 3306 +                    } +                ], +                "selector": { +                    "name": "testdb" +                }, +                "sessionAffinity": "None", +                "type": "ClusterIP" +            }, +            "status": { +                "loadBalancer": {} +            } +        }, +        { +            "apiVersion": "v1", +            "kind": "DeploymentConfig", +            "metadata": { +                "creationTimestamp": null, +                "labels": { +                    "template": "mysql-ephemeral-template" +                }, +                "name": "testdb" +            }, +            "spec": { +                "replicas": 1, +                "selector": { +                    "name": "testdb" +                }, +                "strategy": { +                    "type": "Recreate" +                }, +                "template": { +                    "metadata": { +                        "creationTimestamp": null, +                        "labels": { +                            "name": "testdb" +                        } +                    }, +                    "spec": { +                        "containers": [ +                            { +                                "capabilities": {}, +                                "env": [ +                                    { +                                        "name": "MYSQL_USER", +                                        "value": "userHJJ" +                                    }, +                                    { +                                        "name": "MYSQL_PASSWORD", +                                        "value": "GITOAduAMaV6k688" +                                    }, +                                    { +                                        "name": "MYSQL_DATABASE", +                                        "value": "sampledb" +                                    } +                                ], +                                "image": " ", +                                "imagePullPolicy": "IfNotPresent", +                                "livenessProbe": { +                                    "initialDelaySeconds": 30, +                                    "tcpSocket": { +                                        "port": 3306 +                                    }, +                                    "timeoutSeconds": 1 +                                }, +                                "name": "mysql", +                                "ports": [ +                                    { +                                        "containerPort": 3306, +                                        "protocol": "TCP" +                                    } +                                ], +                                "readinessProbe": { +                                    "exec": { +                                        "command": [ +                                            "/bin/sh", +                                            "-i", +                                            "-c", +                                            "MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'" +                                        ] +                                    }, +                                    "initialDelaySeconds": 5, +                                    "timeoutSeconds": 1 +                                }, +                                "resources": { +                                    "limits": { +                                        "memory": "512Mi" +                                    } +                                }, +                                "securityContext": { +                                    "capabilities": {}, +                                    "privileged": false +                                }, +                                "terminationMessagePath": "/dev/termination-log", +                                "volumeMounts": [ +                                    { +                                        "mountPath": "/var/lib/mysql/data", +                                        "name": "testdb-data" +                                    } +                                ] +                            } +                        ], +                        "dnsPolicy": "ClusterFirst", +                        "restartPolicy": "Always", +                        "volumes": [ +                            { +                                "emptyDir": { +                                    "medium": "" +                                }, +                                "name": "testdb-data" +                            } +                        ] +                    } +                }, +                "triggers": [ +                    { +                        "imageChangeParams": { +                            "automatic": true, +                            "containerNames": [ +                                "mysql" +                            ], +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "mysql:5.6", +                                "namespace": "test" +                            }, +                            "lastTriggeredImage": "" +                        }, +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    } +                ] +            } +        } +    ] +}''' + +        mock_cmd.side_effect = [ +            (0, OCProcessTest.mysql, ''), +            (0, OCProcessTest.mysql, ''), +            (0, mysqlproc, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mock_kubeconfig', +        ] + +        results = OCProcess.run_ansible(params, False) + +        self.assertFalse(results['changed']) +        self.assertEqual(results['results']['results']['items'][0]['metadata']['name'], 'testdb') + +    def tearDown(self): +        '''TearDown method''' +        pass + + +if __name__ == "__main__": +    unittest.main() diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index cae15d61a..82db36eba 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -5,6 +5,6 @@ etcd_hostname: "{{ openshift.common.hostname }}"  etcd_ip: "{{ openshift.common.ip }}"  etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"  etcd_cert_prefix: -etcd_cert_config_dir: /etc/etcd +etcd_cert_config_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container | bool else '/var/lib/etcd/etcd.etcd/etc' }}"  etcd_peer_url_scheme: https  etcd_url_scheme: https diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml new file mode 100644 index 000000000..28b388560 --- /dev/null +++ b/roles/openshift_facts/defaults/main.yml @@ -0,0 +1,2 @@ +--- +use_system_containers: false diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ec2942b69..7a0642cce 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1785,11 +1785,14 @@ def set_container_facts_if_unset(facts):          facts['etcd']['etcd_image'] = etcd_image      if 'master' in facts and 'master_image' not in facts['master']:          facts['master']['master_image'] = master_image +        facts['master']['master_system_image'] = master_image      if 'node' in facts:          if 'node_image' not in facts['node']:              facts['node']['node_image'] = node_image +            facts['node']['node_system_image'] = node_image          if 'ovs_image' not in facts['node']:              facts['node']['ovs_image'] = ovs_image +            facts['node']['ovs_system_image'] = ovs_image      if safe_get_bool(facts['common']['is_containerized']):          facts['common']['admin_binary'] = '/usr/local/bin/oadm' diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index b7b521f1a..9a1982076 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -9,6 +9,10 @@      l_is_atomic: "{{ ostree_booted.stat.exists }}"  - set_fact:      l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" +    l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}" +    l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}" +    l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}" +    l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"  - name: Ensure various deps are installed    package: name={{ item }} state=present @@ -27,6 +31,11 @@        hostname: "{{ openshift_hostname | default(None) }}"        ip: "{{ openshift_ip | default(None) }}"        is_containerized: "{{ l_is_containerized | default(None) }}" +      is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}" +      is_node_system_container: "{{ l_is_node_system_container | default(false) }}" +      is_master_system_container: "{{ l_is_master_system_container | default(false) }}" +      is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}" +      system_images_registry: "{{ system_images_registry | default('') }}"        public_hostname: "{{ openshift_public_hostname | default(None) }}"        public_ip: "{{ openshift_public_ip | default(None) }}"        portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 9394977c0..f7b2f7743 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -24,8 +24,8 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log  - `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.  - `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'.  - `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'. -- `master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.cluster.local'. -- `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://localhost:8443'. +- `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'. +- `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:8443'.  - `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'.  - `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.  - `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'. @@ -51,8 +51,8 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log  - `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.  - `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.  - `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'. -- `openshift_logging_fluentd_use_journal`: Whether or not Fluentd should read log entries from Journal. Defaults to 'False'. NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver and may overwrite this value. -- `openshift_logging_fluentd_journal_read_from_head`: Whether or not Fluentd will try to read from the head of Journal when first starting up, using this may cause a delay in ES receiving current log records. Defaults to 'False'. +- `openshift_logging_fluentd_use_journal`: NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver when using the default of empty. +- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.  - `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].  - `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index ead59c029..dc1e66d55 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -1,9 +1,9 @@  --- -openshift_logging_image_prefix: docker.io/openshift/origin- -openshift_logging_image_version: latest +openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default(docker.io/openshift/origin-) }}" +openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(latest) }}"  openshift_logging_use_ops: False -master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" -openshift_logging_master_public_url: "https://{{openshift.common.public_hostname}}:8443" +openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" +openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(https://{{openshift.common.public_hostname}}:8443) }}"  openshift_logging_namespace: logging  openshift_logging_install_logging: True @@ -19,7 +19,7 @@ openshift_logging_curator_memory_limit: null  openshift_logging_curator_ops_cpu_limit: 100m  openshift_logging_curator_ops_memory_limit: null -openshift_logging_kibana_hostname: "kibana.{{openshift.common.dns_domain}}" +openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default(kibana.{{openshift.common.dns_domain}}) }}"  openshift_logging_kibana_cpu_limit: null  openshift_logging_kibana_memory_limit: null  openshift_logging_kibana_proxy_debug: false @@ -27,7 +27,19 @@ openshift_logging_kibana_proxy_cpu_limit: null  openshift_logging_kibana_proxy_memory_limit: null  openshift_logging_kibana_replica_count: 1 -openshift_logging_kibana_ops_hostname: "kibana-ops.{{openshift.common.dns_domain}}" +#The absolute path on the control node to the cert file to use +#for the public facing kibana certs +openshift_logging_kibana_cert: "" + +#The absolute path on the control node to the key file to use +#for the public facing kibana certs +openshift_logging_kibana_key: "" + +#The absolute path on the control node to the CA file to use +#for the public facing kibana certs +openshift_logging_kibana_ca: "" + +openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default(kibana-ops.{{openshift.common.dns_domain}}) }}"  openshift_logging_kibana_ops_cpu_limit: null  openshift_logging_kibana_ops_memory_limit: null  openshift_logging_kibana_ops_proxy_debug: false @@ -39,8 +51,8 @@ openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}  openshift_logging_fluentd_cpu_limit: 100m  openshift_logging_fluentd_memory_limit: 512Mi  openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_use_journal: false -openshift_logging_fluentd_journal_read_from_head: false +openshift_logging_fluentd_use_journal: '' +openshift_logging_fluentd_journal_read_from_head: ''  openshift_logging_fluentd_hosts: ['--all']  openshift_logging_es_host: logging-es @@ -48,13 +60,13 @@ openshift_logging_es_port: 9200  openshift_logging_es_ca: /etc/fluent/keys/ca  openshift_logging_es_client_cert: /etc/fluent/keys/cert  openshift_logging_es_client_key: /etc/fluent/keys/key -openshift_logging_es_cluster_size: 1 +openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"  openshift_logging_es_cpu_limit: null  openshift_logging_es_memory_limit: 1024Mi  openshift_logging_es_pv_selector: null -openshift_logging_es_pvc_dynamic: False -openshift_logging_es_pvc_size: "" -openshift_logging_es_pvc_prefix: logging-es +openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}" +openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}" +openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default(logging-es) }}"  openshift_logging_es_recover_after_time: 5m  openshift_logging_es_storage_group: 65534 @@ -66,13 +78,13 @@ openshift_logging_es_ops_port: 9200  openshift_logging_es_ops_ca: /etc/fluent/keys/ca  openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert  openshift_logging_es_ops_client_key: /etc/fluent/keys/key -openshift_logging_es_ops_cluster_size: 1 +openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"  openshift_logging_es_ops_cpu_limit: null  openshift_logging_es_ops_memory_limit: 1024Mi  openshift_logging_es_ops_pv_selector: None -openshift_logging_es_ops_pvc_dynamic: False -openshift_logging_es_ops_pvc_size: "" -openshift_logging_es_ops_pvc_prefix: logging-es-ops +openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" +openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}" +openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(logging-es-ops) }}"  openshift_logging_es_ops_recover_after_time: 5m  openshift_logging_es_ops_storage_group: 65534 diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf index aa843e983..c0c1c8a44 100644 --- a/roles/openshift_logging/files/fluent.conf +++ b/roles/openshift_logging/files/fluent.conf @@ -22,6 +22,7 @@    @include configs.d/openshift/filter-k8s-flatten-hash.conf    @include configs.d/openshift/filter-k8s-record-transform.conf    @include configs.d/openshift/filter-syslog-record-transform.conf +  @include configs.d/openshift/filter-common-data-model.conf    @include configs.d/openshift/filter-post-*.conf  ## diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml index 601ec9e83..d782d621e 100644 --- a/roles/openshift_logging/tasks/generate_pvcs.yaml +++ b/roles/openshift_logging/tasks/generate_pvcs.yaml @@ -4,10 +4,10 @@    vars:      pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}"      start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}" -  with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size - 1) | ternary(start, openshift_logging_es_cluster_size - 1)}} +  with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size|int - 1) | ternary(start, openshift_logging_es_cluster_size|int - 1)}}    when:      - openshift_logging_es_pvc_size | search('^\d.*') -    - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size }}" +    - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size|int }}"    check_mode: no  - name: Generating PersistentVolumeClaims diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml index 60694f67e..3c462378b 100644 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -1,4 +1,20 @@  --- +- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }} +  when: "{{ openshift_logging_kibana_key | trim | length > 0 }}" +  changed_when: false + +- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode  }} +  when: "{{openshift_logging_kibana_cert | trim | length > 0}}" +  changed_when: false + +- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode  }} +  when: "{{openshift_logging_kibana_ca | trim | length > 0}}" +  changed_when: false + +- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }} +  when: kibana_ca is not defined +  changed_when: false +  - name: Generating logging routes    template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml    tags: routes @@ -6,7 +22,9 @@      obj_name: "{{route_info.name}}"      route_host: "{{route_info.host}}"      service_name: "{{route_info.name}}" -    tls_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" +    tls_key: "{{kibana_key | default('') | b64decode}}" +    tls_cert: "{{kibana_cert | default('') | b64decode}}" +    tls_ca_cert: "{{kibana_ca | b64decode}}"      tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"      labels:        component: support diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index 1d6e55e44..f9c2c81fb 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -54,10 +54,10 @@    fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"    vars:      es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}" -    cluster_size: "{{openshift_logging_es_ops_cluster_size}}" +    cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"    when:      - openshift_logging_use_ops | bool -    - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}" +    - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"    check_mode: no  - name: Generate PersistentVolumeClaims for Ops @@ -66,7 +66,7 @@      es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"      es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"      openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}" -    openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size}}" +    openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"      openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"      openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}"      openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}" @@ -89,7 +89,7 @@      es_cluster_name: "{{component}}"      deploy_name_prefix: "logging-{{component}}"      deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" -    cluster_size: "{{openshift_logging_es_ops_cluster_size}}" +    cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"    with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}    when:      - openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index d52429f03..e23c3f9f1 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -9,11 +9,11 @@  - name: Validate Elasticsearch cluster size    fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this." -  when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size +  when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int  - name: Validate Elasticsearch Ops cluster size    fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this." -  when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size +  when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int  - name: Install logging    include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml" diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml index cceacd538..0dc31932c 100644 --- a/roles/openshift_logging/tasks/upgrade_logging.yaml +++ b/roles/openshift_logging/tasks/upgrade_logging.yaml @@ -17,7 +17,7 @@    oc_scale:      kind: dc      name: "{{object.split('/')[1]}}" -    namespace: "{{mktemp.stdout}}/admin.kubeconfig" +    namespace: "{{openshift_logging_namespace}}"      replicas: 1    with_items: "{{es_dc.stdout_lines}}"    loop_control: diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2 index de6258eaa..b7bc15b62 100644 --- a/roles/openshift_logging/templates/curator.j2 +++ b/roles/openshift_logging/templates/curator.j2 @@ -48,7 +48,7 @@ spec:            env:              -                name: "K8S_HOST_URL" -              value: "{{master_url}}" +              value: "{{openshift_logging_master_url}}"              -                name: "ES_HOST"                value: "{{es_host}}" diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 index b6c91f8ed..223d342b9 100644 --- a/roles/openshift_logging/templates/fluentd.j2 +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -61,7 +61,7 @@ spec:            readOnly: true          env:          - name: "K8S_HOST_URL" -          value: "{{master_url}}" +          value: "{{openshift_logging_master_url}}"          - name: "ES_HOST"            value: "{{openshift_logging_es_host}}"          - name: "ES_PORT" diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2 index 3a9e03768..be9b45ab4 100644 --- a/roles/openshift_logging/templates/kibana.j2 +++ b/roles/openshift_logging/templates/kibana.j2 @@ -90,7 +90,7 @@ spec:               value: kibana-proxy              -               name: "OAP_MASTER_URL" -             value: {{master_url}} +             value: {{openshift_logging_master_url}}              -               name: "OAP_PUBLIC_MASTER_URL"               value: {{openshift_logging_master_public_url}} diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging/templates/route_reencrypt.j2 index 8be30a2c4..341ffdd84 100644 --- a/roles/openshift_logging/templates/route_reencrypt.j2 +++ b/roles/openshift_logging/templates/route_reencrypt.j2 @@ -11,6 +11,14 @@ metadata:  spec:    host: {{ route_host }}    tls: +{% if tls_key is defined and tls_key | length > 0 %} +    key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} +    certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %}      caCertificate: |  {% for line in tls_ca_cert.split('\n') %}        {{ line }} diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml index 11662c446..07cc05683 100644 --- a/roles/openshift_logging/vars/main.yaml +++ b/roles/openshift_logging/vars/main.yaml @@ -1,8 +1,8 @@  ---  openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}" -es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}" -es_recover_expected_nodes: "{{openshift_logging_es_cluster_size}}" -es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size/2 + 1}}" -es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size - 1}}" -es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size}}" +es_node_quorum: "{{openshift_logging_es_cluster_size|int/2 + 1}}" +es_recover_after_nodes: "{{openshift_logging_es_cluster_size|int - 1}}" +es_recover_expected_nodes: "{{openshift_logging_es_cluster_size|int}}" +es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size|int/2 + 1}}" +es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size|int - 1}}" +es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size|int}}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 9cd6b6c81..2ef61cddf 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -131,6 +131,10 @@  - name: Install the systemd units    include: systemd_units.yml +- name: Install Master system container +  include: system_container.yml +  when: openshift.common.is_containerized | bool and openshift.common.is_master_system_container | bool +  - name: Create session secrets file    template:      dest: "{{ openshift.master.session_secrets_file }}" diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml new file mode 100644 index 000000000..e3e3d7948 --- /dev/null +++ b/roles/openshift_master/tasks/system_container.yml @@ -0,0 +1,36 @@ +--- +- name: Pre-pull master system container image +  command: > +    atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Pulling layer' in pull_result.stdout" + +- name: Check Master system container package +  command: > +    atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master +  register: result + +- name: Update Master system container package +  command: > +    atomic containers update {{ openshift.common.service_type }}-master +  register: update_result +  changed_when: "'Extracting' in update_result.stdout" +  when: +    - ("master" in result.stdout) +    - (openshift.common.version is defined) and (openshift.common.version == openshift_version) | bool + +- name: Uninstall Master system container package +  command: > +    atomic uninstall {{ openshift.common.service_type }}-master +  failed_when: False +  when: +    - ("master" in result.stdout) +    - (openshift.common.version is not defined) or (openshift.common.version != openshift_version) | bool + +- name: Install Master system container package +  command: > +    atomic install --system --name={{ openshift.common.service_type }}-master {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} +  when: +    - (openshift.common.version is not defined) or (openshift.common.version != openshift_version) or ("master" not in result.stdout) | bool +  notify: +    - restart master diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 39ea42ab3..4ab98cbbb 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -20,14 +20,14 @@      docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Downloaded newer image' in pull_result.stdout" -  when: openshift.common.is_containerized | bool +  when: openshift.common.is_containerized | bool and not openshift.common.is_master_system_container | bool  # workaround for missing systemd unit files  - name: Create the systemd unit files    template:      src: "master_docker/master.docker.service.j2"      dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service" -  when: openshift.common.is_containerized | bool and (openshift.master.ha is not defined or not openshift.master.ha | bool) +  when: openshift.common.is_containerized | bool and (openshift.master.ha is not defined or not openshift.master.ha | bool and not openshift.common.is_master_system_container | bool)    register: create_master_unit_file  - command: systemctl daemon-reload @@ -132,7 +132,7 @@      dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"      src: master_docker/master.docker.service.j2    register: install_result -  when: openshift.common.is_containerized | bool and openshift.master.ha is defined and not openshift.master.ha | bool +  when: openshift.common.is_containerized | bool and openshift.master.ha is defined and not openshift.master.ha | bool and not openshift.common.is_master_system_container | bool  - name: Preserve Master Proxy Config options    command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 9ae54dac1..cf2d2e103 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -65,7 +65,11 @@ dnsConfig:    bindNetwork: tcp4  {% endif %}  etcdClientInfo: +{% if openshift.common.version_gte_3_2_or_1_2 | bool %} +  ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} +{% else %}    ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} +{% endif %}    certFile: master.etcd-client.crt    keyFile: master.etcd-client.key    urls: @@ -79,12 +83,20 @@ etcdConfig:    peerServingInfo:      bindAddress: {{ openshift.master.bind_addr }}:7001      certFile: etcd.server.crt +{% if openshift.common.version_gte_3_2_or_1_2 | bool %} +    clientCA: ca-bundle.crt +{% else %}      clientCA: ca.crt +{% endif %}      keyFile: etcd.server.key    servingInfo:      bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}      certFile: etcd.server.crt +{% if openshift.common.version_gte_3_2_or_1_2 | bool %} +    clientCA: ca-bundle.crt +{% else %}      clientCA: ca.crt +{% endif %}      keyFile: etcd.server.key    storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd  {% endif %} diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 17614f716..0cfbac8a9 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -23,6 +23,7 @@ openshift_metrics_cassandra_limits_cpu: null  openshift_metrics_cassandra_requests_memory: 1G  openshift_metrics_cassandra_requests_cpu: null  openshift_metrics_cassandra_nodeselector: "" +openshift_metrics_cassandra_storage_group: 65534  openshift_metrics_heapster_standalone: False  openshift_metrics_heapster_limits_memory: 3.75G @@ -31,6 +32,8 @@ openshift_metrics_heapster_requests_memory: 0.9375G  openshift_metrics_heapster_requests_cpu: null  openshift_metrics_heapster_nodeselector: "" +openshift_metrics_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}" +  openshift_metrics_duration: 7  openshift_metrics_resolution: 15s @@ -39,7 +42,6 @@ openshift_metrics_resolution: 15s  # overriding the values here  ##### -openshift_metrics_certs_dir: "{{ openshift.common.config_base }}/master/metrics"  openshift_metrics_master_url: https://kubernetes.default.svc.cluster.local  openshift_metrics_node_id: nodename  openshift_metrics_project: openshift-infra diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml index 4925275e8..f7cba0093 100644 --- a/roles/openshift_metrics/tasks/generate_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_certificates.yaml @@ -1,11 +1,11 @@  ---  - name: generate ca certificate chain -  shell: > +  command: >      {{ openshift.common.admin_binary }} ca create-signer-cert      --config={{ mktemp.stdout }}/admin.kubeconfig -    --key='{{ openshift_metrics_certs_dir }}/ca.key' -    --cert='{{ openshift_metrics_certs_dir }}/ca.crt' -    --serial='{{ openshift_metrics_certs_dir }}/ca.serial.txt' +    --key='{{ mktemp.stdout }}/ca.key' +    --cert='{{ mktemp.stdout }}/ca.crt' +    --serial='{{ mktemp.stdout }}/ca.serial.txt'      --name="metrics-signer@$(date +%s)" -  when: not '{{ openshift_metrics_certs_dir }}/ca.key' | exists +  - include: generate_hawkular_certificates.yaml diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 9333d341c..854697abb 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -13,13 +13,13 @@      hostnames: hawkular-cassandra    changed_when: no -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-truststore.pwd +- slurp: src={{ mktemp.stdout }}/hawkular-cassandra-truststore.pwd    register: cassandra_truststore_password -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-truststore.pwd +- slurp: src={{ mktemp.stdout }}/hawkular-metrics-truststore.pwd    register: hawkular_truststore_password -- stat: path="{{openshift_metrics_certs_dir}}/{{item}}" +- stat: path="{{mktemp.stdout}}/{{item}}"    register: pwd_file_stat    with_items:    - hawkular-metrics.pwd @@ -32,44 +32,33 @@    with_items: "{{pwd_file_stat.results}}"    changed_when: no -- name: Create temp directory local on control node -  local_action: command mktemp -d -  register: local_tmp -  changed_when: False -  - name: generate password for hawkular metrics and jgroups    local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"    with_items:    - hawkular-metrics    - hawkular-jgroups-keystore -  when: "not pwd_files['{{ item }}.pwd'].exists"  - name: generate htpasswd file for hawkular metrics    local_action: >      shell htpasswd -ci      '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular      < '{{ local_tmp.stdout }}/hawkular-metrics.pwd' -  when: "not pwd_files['hawkular-metrics.htpasswd'].exists"  - name: copy local generated passwords to target    copy:      src: "{{local_tmp.stdout}}/{{item}}" -    dest: "{{openshift_metrics_certs_dir}}/{{item}}" +    dest: "{{mktemp.stdout}}/{{item}}"    with_items:    - hawkular-metrics.pwd    - hawkular-metrics.htpasswd    - hawkular-jgroups-keystore.pwd -  when: "not pwd_files['{{ item }}'].exists"  - include: import_jks_certs.yaml -- local_action: file path="{{local_tmp.stdout}}" state=absent -  changed_when: False -  - name: read files for the hawkular-metrics secret    shell: >      printf '%s: ' '{{ item }}' -    && base64 --wrap 0 '{{ openshift_metrics_certs_dir }}/{{ item }}' +    && base64 --wrap 0 '{{ mktemp.stdout }}/{{ item }}'    register: hawkular_secrets    with_items:    - ca.crt diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml index 2449b1518..ced2df1d0 100644 --- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml @@ -3,13 +3,12 @@    command: >      {{ openshift.common.admin_binary }} ca create-server-cert      --config={{ mktemp.stdout }}/admin.kubeconfig -    --key='{{ openshift_metrics_certs_dir }}/heapster.key' -    --cert='{{ openshift_metrics_certs_dir }}/heapster.cert' +    --key='{{ mktemp.stdout }}/heapster.key' +    --cert='{{ mktemp.stdout }}/heapster.cert'      --hostnames=heapster -    --signer-cert='{{ openshift_metrics_certs_dir }}/ca.crt' -    --signer-key='{{ openshift_metrics_certs_dir }}/ca.key' -    --signer-serial='{{ openshift_metrics_certs_dir }}/ca.serial.txt' -  when: not '{{ openshift_metrics_certs_dir }}/heapster.key' | exists +    --signer-cert='{{ mktemp.stdout }}/ca.crt' +    --signer-key='{{ mktemp.stdout }}/ca.key' +    --signer-serial='{{ mktemp.stdout }}/ca.serial.txt'  - when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"    block: @@ -17,11 +16,11 @@      slurp: src={{ item }}      register: heapster_secret      with_items: -    - "{{ openshift_metrics_certs_dir }}/heapster.cert" -    - "{{ openshift_metrics_certs_dir }}/heapster.key" +    - "{{ mktemp.stdout }}/heapster.cert" +    - "{{ mktemp.stdout }}/heapster.key"      - "{{ client_ca }}"      vars: -      custom_ca: "{{ openshift_metrics_certs_dir }}/heapster_client_ca.crt" +      custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt"        default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt"        client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}"    - name: generate heapster secret template diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml index 16fd8d9f8..57ec70c79 100644 --- a/roles/openshift_metrics/tasks/import_jks_certs.yaml +++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml @@ -1,37 +1,37 @@  --- -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.keystore" +- stat: path="{{mktemp.stdout}}/hawkular-cassandra.keystore"    register: cassandra_keystore    check_mode: no -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.truststore" +- stat: path="{{mktemp.stdout}}/hawkular-cassandra.truststore"    register: cassandra_truststore    check_mode: no -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore" +- stat: path="{{mktemp.stdout}}/hawkular-metrics.keystore"    register: metrics_keystore    check_mode: no -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.truststore" +- stat: path="{{mktemp.stdout}}/hawkular-metrics.truststore"    register: metrics_truststore    check_mode: no -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-jgroups.keystore" +- stat: path="{{mktemp.stdout}}/hawkular-jgroups.keystore"    register: jgroups_keystore    check_mode: no  - block: -  - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd +  - slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd      register: metrics_keystore_password -  - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd +  - slurp: src={{ mktemp.stdout }}/hawkular-cassandra-keystore.pwd      register: cassandra_keystore_password -  - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd +  - slurp: src={{ mktemp.stdout }}/hawkular-jgroups-keystore.pwd      register: jgroups_keystore_password    - fetch:        dest: "{{local_tmp.stdout}}/" -      src: "{{ openshift_metrics_certs_dir }}/{{item}}" +      src: "{{ mktemp.stdout }}/{{item}}"        flat: yes      changed_when: False      with_items: @@ -52,7 +52,7 @@      changed_when: False    - copy: -      dest: "{{openshift_metrics_certs_dir}}/" +      dest: "{{mktemp.stdout}}/"        src: "{{item}}"      with_fileglob: "{{local_tmp.stdout}}/*.*store" diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml index 1ba11efa8..6b37f85ab 100644 --- a/roles/openshift_metrics/tasks/install_hawkular.yaml +++ b/roles/openshift_metrics/tasks/install_hawkular.yaml @@ -17,7 +17,7 @@    changed_when: false  - name: read hawkular-metrics route destination ca certificate -  slurp: src={{ openshift_metrics_certs_dir }}/ca.crt +  slurp: src={{ mktemp.stdout }}/ca.crt    register: metrics_route_dest_ca_cert    changed_when: false diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index d03d4176b..1eebff3bf 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -9,6 +9,11 @@    changed_when: False    when: "{{ openshift_metrics_install_metrics | bool }}" +- name: Create temp directory local on control node +  local_action: command mktemp -d +  register: local_tmp +  changed_when: False +  - name: Copy the admin client config(s)    command: >       cp {{ openshift.common.config_base}}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig @@ -17,3 +22,9 @@    tags: metrics_init  - include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}" + +- name: Delete temp directory +  local_action: file path=local_tmp.stdout state=absent +  tags: metrics_cleanup +  changed_when: False +  check_mode: no diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index 262acd546..2e2013d40 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -12,12 +12,6 @@    - openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types    - "not {{ openshift_metrics_heapster_standalone | bool }}" -- name: create certificate output directory -  file: -    path: "{{ openshift_metrics_certs_dir }}" -    state: directory -    mode: 0700 -  - name: list existing secrets    command: >      {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml index 5ca8f4462..199968579 100644 --- a/roles/openshift_metrics/tasks/setup_certificate.yaml +++ b/roles/openshift_metrics/tasks/setup_certificate.yaml @@ -3,50 +3,41 @@    command: >      {{ openshift.common.admin_binary }} ca create-server-cert      --config={{ mktemp.stdout }}/admin.kubeconfig -    --key='{{ openshift_metrics_certs_dir }}/{{ component }}.key' -    --cert='{{ openshift_metrics_certs_dir }}/{{ component }}.crt' +    --key='{{ mktemp.stdout }}/{{ component }}.key' +    --cert='{{ mktemp.stdout }}/{{ component }}.crt'      --hostnames='{{ hostnames }}' -    --signer-cert='{{ openshift_metrics_certs_dir }}/ca.crt' -    --signer-key='{{ openshift_metrics_certs_dir }}/ca.key' -    --signer-serial='{{ openshift_metrics_certs_dir }}/ca.serial.txt' -  when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.key'|exists +    --signer-cert='{{ mktemp.stdout }}/ca.crt' +    --signer-key='{{ mktemp.stdout }}/ca.key' +    --signer-serial='{{ mktemp.stdout }}/ca.serial.txt'  - slurp: src={{item}}    register: component_certs    with_items: -    - '{{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}.key' -    - '{{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}.crt' -  when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.pem'|exists +    - '{{ mktemp.stdout | quote }}/{{ component|quote }}.key' +    - '{{ mktemp.stdout | quote }}/{{ component|quote }}.crt'  - name: generate {{ component }} certificate    copy: -    dest: '{{ openshift_metrics_certs_dir }}/{{ component }}.pem' +    dest: '{{ mktemp.stdout }}/{{ component }}.pem'      content: "{{ component_certs.results | map(attribute='content') | map('b64decode') | join('')  }}" -  when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.pem'|exists  - name: generate random password for the {{ component }} keystore    copy:      content: "{{ 15 | oo_random_word }}" -    dest: '{{ openshift_metrics_certs_dir }}/{{ component }}-keystore.pwd' -  when: > -    not '{{ openshift_metrics_certs_dir }}/{{ component }}-keystore.pwd'|exists +    dest: '{{ mktemp.stdout }}/{{ component }}-keystore.pwd' -- slurp: src={{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}-keystore.pwd +- slurp: src={{ mktemp.stdout | quote }}/{{ component|quote }}-keystore.pwd    register: keystore_password  - name: create the {{ component }} pkcs12 from the pem file    command: >      openssl pkcs12 -export -    -in '{{ openshift_metrics_certs_dir }}/{{ component }}.pem' -    -out '{{ openshift_metrics_certs_dir }}/{{ component }}.pkcs12' +    -in '{{ mktemp.stdout }}/{{ component }}.pem' +    -out '{{ mktemp.stdout }}/{{ component }}.pkcs12'      -name '{{ component }}' -noiter -nomaciter      -password 'pass:{{keystore_password.content | b64decode }}' -  when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.pkcs12'|exists  - name: generate random password for the {{ component }} truststore    copy:      content: "{{ 15 | oo_random_word }}" -    dest: '{{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}-truststore.pwd' -  when: > -    not -    '{{ openshift_metrics_certs_dir | quote }}/{{ component| quote  }}-truststore.pwd'|exists +    dest: '{{ mktemp.stdout | quote }}/{{ component|quote }}-truststore.pwd' diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 index 6f6efc469..504476dc4 100644 --- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 @@ -19,6 +19,9 @@ spec:          type: hawkular-cassandra      spec:        serviceAccount: cassandra +      securityContext: +        supplementalGroups: +        - {{openshift_metrics_cassandra_storage_group}}  {% if node_selector is iterable and node_selector | length > 0 %}        nodeSelector:  {% for key, value in node_selector.iteritems() %} diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 5d64e0749..691227915 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -75,7 +75,7 @@  - name: Persist net.ipv4.ip_forward sysctl entry    sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes -- name: Start and enable openvswitch docker service +- name: Start and enable openvswitch service    systemd:      name: openvswitch.service      enabled: yes diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml new file mode 100644 index 000000000..01e2d33c7 --- /dev/null +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -0,0 +1,34 @@ +--- +- name: Pre-pull node system container image +  command: > +    atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Pulling layer' in pull_result.stdout" + +- name: Check Node system container package +  command: > +    atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-node +  register: result + +- name: Update Node system container package +  command: > +    atomic containers update {{ openshift.common.service_type }}-node +  register: update_result +  changed_when: "'Extracting' in update_result.stdout" +  when: +  - (openshift.common.version is defined) and (openshift.common.version == openshift_version) and ("node" in result.stdout) | bool + +- name: Uninstall Node system container package +  command: > +    atomic uninstall {{ openshift.common.service_type }}-node +  failed_when: False +  when: +  - (openshift.common.version is not defined) or (openshift.common.version != openshift_version) and ("node" in result.stdout) | bool + +- name: Install Node system container package +  command: > +    atomic install --system --name={{ openshift.common.service_type }}-node {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }} +  register: install_node_result +  changed_when: "'Extracting' in pull_result.stdout" +  when: +  - (openshift.common.version is not defined) or (openshift.common.version != openshift_version) or ("node" not in result.stdout) | bool diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml new file mode 100644 index 000000000..47fac99eb --- /dev/null +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -0,0 +1,36 @@ +--- +- name: Pre-pull OpenVSwitch system container image +  command: > +    atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Pulling layer' in pull_result.stdout" + +- name: Check OpenvSwitch system container package +  command: > +    atomic containers list --no-trunc -a -f container=openvswitch +  register: result +  when: +  - openshift.common.is_openvswitch_system_container | bool + +- name: Update OpenvSwitch system container package +  command: > +    atomic containers update openvswitch +  register: update_result +  changed_when: "'Extracting' in update_result.stdout" +  when: +  - (openshift.common.version is defined) and (openshift.common.version == openshift_version) and ("openvswitch" in result.stdout) | bool + +- name: Uninstall OpenvSwitch system container package +  command: > +    atomic uninstall openvswitch +  failed_when: False +  when: +  - (openshift.common.version is not defined) or (openshift.common.version != openshift_version) and ("openvswitch" in result.stdout) | bool + +- name: Install OpenvSwitch system container package +  command: > +    atomic install --system --name=openvswitch {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} +  when: +  - (openshift.common.version is not defined) or (openshift.common.version != openshift_version) or ("openvswitch" not in result.stdout) | bool +  notify: +  - restart docker diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 5243a87fe..52482d09b 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -2,20 +2,6 @@  # This file is included both in the openshift_master role and in the upgrade  # playbooks. -- name: Pre-pull node image -  command: > -    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} -  register: pull_result -  changed_when: "'Downloaded newer image' in pull_result.stdout" -  when: openshift.common.is_containerized | bool - -- name: Pre-pull openvswitch image -  command: > -    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} -  register: pull_result -  changed_when: "'Downloaded newer image' in pull_result.stdout" -  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool -  - name: Install Node dependencies docker service file    template:      dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" @@ -23,12 +9,21 @@    register: install_node_dep_result    when: openshift.common.is_containerized | bool -- name: Install Node docker service file -  template: -    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" -    src: openshift.docker.node.service -  register: install_node_result -  when: openshift.common.is_containerized | bool +- block: +  - name: Pre-pull node image +    command: > +      docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} +    register: pull_result +    changed_when: "'Downloaded newer image' in pull_result.stdout" + +  - name: Install Node docker service file +    template: +      dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" +      src: openshift.docker.node.service +    register: install_node_result +  when: +  - openshift.common.is_containerized | bool +  - not openshift.common.is_node_system_container | bool  - name: Create the openvswitch service env file    template: @@ -39,6 +34,19 @@    notify:    - restart openvswitch +- name: Install Node system container +  include: node_system_container.yml +  when: +  - openshift.common.is_containerized | bool +  - openshift.common.is_node_system_container | bool + +- name: Install OpenvSwitch system containers +  include: openvswitch_system_container.yml +  when: +  - openshift.common.use_openshift_sdn | default(true) | bool +  - openshift.common.is_containerized | bool +  - openshift.common.is_openvswitch_system_container | bool +  # May be a temporary workaround.  # https://bugzilla.redhat.com/show_bug.cgi?id=1331590  - name: Create OpenvSwitch service.d directory @@ -54,13 +62,23 @@    notify:    - restart openvswitch -- name: Install OpenvSwitch docker service file -  template: -    dest: "/etc/systemd/system/openvswitch.service" -    src: openvswitch.docker.service -  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool -  notify: -  - restart openvswitch +- block: +  - name: Pre-pull openvswitch image +    command: > +      docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} +    register: pull_result +    changed_when: "'Downloaded newer image' in pull_result.stdout" + +  - name: Install OpenvSwitch docker service file +    template: +      dest: "/etc/systemd/system/openvswitch.service" +      src: openvswitch.docker.service +    notify: +    - restart openvswitch +  when: +  - openshift.common.is_containerized | bool +  - openshift.common.use_openshift_sdn | default(true) | bool +  - not openshift.common.is_openvswitch_system_container | bool  - name: Configure Node settings    lineinfile: | 
