diff options
37 files changed, 746 insertions, 212 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index ead513c3d..e3f5491cd 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.36-1 ./ +3.0.37-1 ./ @@ -65,6 +65,9 @@ class Ohi(object): # We weren't able to determine what they wanted to do raise ArgumentError("Invalid combination of arguments") + if self.args.ip: + hosts = self.aws.convert_to_ip(hosts) + for host in sorted(hosts, key=utils.normalize_dnsname): if self.args.user: print "%s@%s" % (self.args.user, host) @@ -112,6 +115,9 @@ class Ohi(object): parser.add_argument('--v3', action='store_true', default=False, help='Specify the openshift version.') + parser.add_argument('--ip', action='store_true', default=False, + help='Return ip address only.') + parser.add_argument('--all-versions', action='store_true', default=False, help='Specify the openshift version. Return all versions') diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py index 3639ef733..945e6a20c 100644 --- a/bin/openshift_ansible/awsutil.py +++ b/bin/openshift_ansible/awsutil.py @@ -232,4 +232,14 @@ class AwsUtil(object): if version != 'all': retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), [])) - return retval + return list(retval) + + def convert_to_ip(self, hosts, cached=False): + """convert a list of host names to ip addresses""" + + inv = self.get_inventory(cached=cached) + ips = [] + for host in hosts: + ips.append(inv['_meta']['hostvars'][host]['oo_public_ip']) + + return ips @@ -13,7 +13,10 @@ Options: -p PAR, --par=PAR max number of parallel threads (OPTIONAL) --outdir=OUTDIR output directory for stdout files (OPTIONAL) --errdir=ERRDIR output directory for stderr files (OPTIONAL) + -c CLUSTER, --cluster CLUSTER + which cluster to use -e ENV, --env ENV which environment to use + --v3 When working with v3 environments. v2 by default -t HOST_TYPE, --host-type HOST_TYPE which host type to use --list-host-types list all of the host types @@ -61,12 +64,23 @@ while [ $# -gt 0 ] ; do shift # get past the value of the option ;; + -c) + shift # get past the option + CLUSTER=$1 + shift # get past the value of the option + ;; + -e) shift # get past the option ENV=$1 shift # get past the value of the option ;; + --v3) + OPENSHIFT_VERSION="--v3 --ip" + shift # get past the value of the option + ;; + --timeout) shift # get past the option TIMEOUT=$1 @@ -103,20 +117,26 @@ while [ $# -gt 0 ] ; do done # Get host list from ohi -if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$ENV" ] ; then - HOSTS="$(ohi -e "$ENV" 2>/dev/null)" - OHI_ECODE=$? -elif [ -n "$HOST_TYPE" ] ; then - HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)" +CMD="" +if [ -n "$CLUSTER" ] ; then + CMD="$CMD -c $CLUSTER" +fi + +if [ -n "$ENV" ] ; then + CMD="$CMD -e $ENV" +fi + +if [ -n "$HOST_TYPE" ] ; then + CMD="$CMD -t $HOST_TYPE" +fi + +if [ -n "$OPENSHIFT_VERSION" ] ; then + CMD="$CMD $OPENSHIFT_VERSION" +fi + +if [ -n "$CMD" ] ; then + HOSTS="$(ohi $CMD 2>/dev/null)" OHI_ECODE=$? -else - echo - echo "Error: either -e or -t must be specified" - echo - exit 10 fi if [ $OHI_ECODE -ne 0 ] ; then @@ -56,9 +56,9 @@ fi PAR=200 USER=root TIMEOUT=0 -ARGS=() ENV="" HOST_TYPE="" + while [ $# -gt 0 ] ; do case $1 in -t|--host-type) @@ -80,7 +80,7 @@ while [ $# -gt 0 ] ; do ;; --v3) - OPENSHIFT_VERSION="--v3" + OPENSHIFT_VERSION="--v3 --ip" shift # get past the value of the option ;; diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index ae275b051..2b39bb59e 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1,9 +1,9 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 -''' +""" Custom filters for use in openshift-ansible -''' +""" from ansible import errors from operator import itemgetter @@ -15,26 +15,29 @@ import json import yaml from ansible.utils.unicode import to_unicode +# Disabling too-many-public-methods, since filter methods are necessarily +# public +# pylint: disable=too-many-public-methods class FilterModule(object): - ''' Custom ansible filters ''' + """ Custom ansible filters """ @staticmethod def oo_pdb(arg): - ''' This pops you into a pdb instance where arg is the data passed in + """ This pops you into a pdb instance where arg is the data passed in from the filter. Ex: "{{ hostvars | oo_pdb }}" - ''' + """ pdb.set_trace() return arg @staticmethod def get_attr(data, attribute=None): - ''' This looks up dictionary attributes of the form a.b.c and returns + """ This looks up dictionary attributes of the form a.b.c and returns the value. Ex: data = {'a': {'b': {'c': 5}}} attribute = "a.b.c" returns 5 - ''' + """ if not attribute: raise errors.AnsibleFilterError("|failed expects attribute to be set") @@ -46,16 +49,16 @@ class FilterModule(object): @staticmethod def oo_flatten(data): - ''' This filter plugin will flatten a list of lists - ''' - if not issubclass(type(data), list): + """ This filter plugin will flatten a list of lists + """ + if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to flatten a List") return [item for sublist in data for item in sublist] @staticmethod def oo_collect(data, attribute=None, filters=None): - ''' This takes a list of dict and collects all attributes specified into a + """ This takes a list of dict and collects all attributes specified into a list. If filter is specified then we will include all items that match _ALL_ of filters. If a dict entry is missing the key in a filter it will be excluded from the match. @@ -67,15 +70,15 @@ class FilterModule(object): attribute = 'a' filters = {'z': 'z'} returns [1, 2, 3] - ''' - if not issubclass(type(data), list): + """ + if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a List") if not attribute: raise errors.AnsibleFilterError("|failed expects attribute to be set") if filters is not None: - if not issubclass(type(filters), dict): + if not isinstance(filters, dict): raise errors.AnsibleFilterError("|failed expects filter to be a" " dict") retval = [FilterModule.get_attr(d, attribute) for d in data if ( @@ -87,16 +90,16 @@ class FilterModule(object): @staticmethod def oo_select_keys_from_list(data, keys): - ''' This returns a list, which contains the value portions for the keys + """ This returns a list, which contains the value portions for the keys Ex: data = { 'a':1, 'b':2, 'c':3 } keys = ['a', 'c'] returns [1, 3] - ''' + """ - if not issubclass(type(data), list): + if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a list") - if not issubclass(type(keys), list): + if not isinstance(keys, list): raise errors.AnsibleFilterError("|failed expects first param is a list") # Gather up the values for the list of keys passed in @@ -106,16 +109,16 @@ class FilterModule(object): @staticmethod def oo_select_keys(data, keys): - ''' This returns a list, which contains the value portions for the keys + """ This returns a list, which contains the value portions for the keys Ex: data = { 'a':1, 'b':2, 'c':3 } keys = ['a', 'c'] returns [1, 3] - ''' + """ - if not issubclass(type(data), dict): + if not isinstance(data, dict): raise errors.AnsibleFilterError("|failed expects to filter on a dict") - if not issubclass(type(keys), list): + if not isinstance(keys, list): raise errors.AnsibleFilterError("|failed expects first param is a list") # Gather up the values for the list of keys passed in @@ -125,13 +128,13 @@ class FilterModule(object): @staticmethod def oo_prepend_strings_in_list(data, prepend): - ''' This takes a list of strings and prepends a string to each item in the + """ This takes a list of strings and prepends a string to each item in the list Ex: data = ['cart', 'tree'] prepend = 'apple-' returns ['apple-cart', 'apple-tree'] - ''' - if not issubclass(type(data), list): + """ + if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") if not all(isinstance(x, basestring) for x in data): raise errors.AnsibleFilterError("|failed expects first param is a list" @@ -141,10 +144,10 @@ class FilterModule(object): @staticmethod def oo_combine_key_value(data, joiner='='): - '''Take a list of dict in the form of { 'key': 'value'} and + """Take a list of dict in the form of { 'key': 'value'} and arrange them as a list of strings ['key=value'] - ''' - if not issubclass(type(data), list): + """ + if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") rval = [] @@ -155,20 +158,20 @@ class FilterModule(object): @staticmethod def oo_combine_dict(data, in_joiner='=', out_joiner=' '): - '''Take a dict in the form of { 'key': 'value', 'key': 'value' } and + """Take a dict in the form of { 'key': 'value', 'key': 'value' } and arrange them as a string 'key=value key=value' - ''' - if not issubclass(type(data), dict): + """ + if not isinstance(data, dict): raise errors.AnsibleFilterError("|failed expects first param is a dict") return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()]) @staticmethod def oo_ami_selector(data, image_name): - ''' This takes a list of amis and an image name and attempts to return + """ This takes a list of amis and an image name and attempts to return the latest ami. - ''' - if not issubclass(type(data), list): + """ + if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") if not data: @@ -184,7 +187,7 @@ class FilterModule(object): @staticmethod def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False): - ''' This takes a dictionary of volume definitions and returns a valid ec2 + """ This takes a dictionary of volume definitions and returns a valid ec2 volume definition based on the host_type and the values in the dictionary. The dictionary should look similar to this: @@ -209,8 +212,8 @@ class FilterModule(object): } } } - ''' - if not issubclass(type(data), dict): + """ + if not isinstance(data, dict): raise errors.AnsibleFilterError("|failed expects first param is a dict") if host_type not in ['master', 'node', 'etcd']: raise errors.AnsibleFilterError("|failed expects etcd, master or node" @@ -243,15 +246,15 @@ class FilterModule(object): @staticmethod def oo_split(string, separator=','): - ''' This splits the input string into a list - ''' + """ This splits the input string into a list + """ return string.split(separator) @staticmethod def oo_haproxy_backend_masters(hosts): - ''' This takes an array of dicts and returns an array of dicts + """ This takes an array of dicts and returns an array of dicts to be used as a backend for the haproxy role - ''' + """ servers = [] for idx, host_info in enumerate(hosts): server = dict(name="master%s" % idx) @@ -264,7 +267,7 @@ class FilterModule(object): @staticmethod def oo_filter_list(data, filter_attr=None): - ''' This returns a list, which contains all items where filter_attr + """ This returns a list, which contains all items where filter_attr evaluates to true Ex: data = [ { a: 1, b: True }, { a: 3, b: False }, @@ -272,19 +275,81 @@ class FilterModule(object): filter_attr = 'b' returns [ { a: 1, b: True }, { a: 5, b: True } ] - ''' - if not issubclass(type(data), list): + """ + if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a list") - if not issubclass(type(filter_attr), str): - raise errors.AnsibleFilterError("|failed expects filter_attr is a str") + if not isinstance(filter_attr, basestring): + raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode") # Gather up the values for the list of keys passed in return [x for x in data if x.has_key(filter_attr) and x[filter_attr]] @staticmethod + def oo_nodes_with_label(nodes, label, value=None): + """ Filters a list of nodes by label and value (if provided) + + It handles labels that are in the following variables by priority: + openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels'] + + Examples: + data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, + 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}, + 'c': {'openshift_node_labels': {'size': 'S'}}] + label = 'color' + returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, + 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}] + + data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, + 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}, + 'c': {'openshift_node_labels': {'size': 'S'}}] + label = 'color' + value = 'green' + returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}] + + Args: + nodes (list[dict]): list of node to node variables + label (str): label to filter `nodes` by + value (Optional[str]): value of `label` to filter by Defaults + to None. + + Returns: + list[dict]: nodes filtered by label and value (if provided) + """ + if not isinstance(nodes, list): + raise errors.AnsibleFilterError("failed expects to filter on a list") + if not isinstance(label, basestring): + raise errors.AnsibleFilterError("failed expects label to be a string") + if value is not None and not isinstance(value, basestring): + raise errors.AnsibleFilterError("failed expects value to be a string") + + def label_filter(node): + """ filter function for testing if node should be returned """ + if not isinstance(node, dict): + raise errors.AnsibleFilterError("failed expects to filter on a list of dicts") + if 'openshift_node_labels' in node: + labels = node['openshift_node_labels'] + elif 'cli_openshift_node_labels' in node: + labels = node['cli_openshift_node_labels'] + elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']: + labels = node['openshift']['node']['labels'] + else: + return False + + if isinstance(labels, basestring): + labels = yaml.safe_load(labels) + if not isinstance(labels, dict): + raise errors.AnsibleFilterError( + "failed expected node labels to be a dict or serializable to a dict" + ) + return label in labels and (value is None or labels[label] == value) + + return [n for n in nodes if label_filter(n)] + + + @staticmethod def oo_parse_heat_stack_outputs(data): - ''' Formats the HEAT stack output into a usable form + """ Formats the HEAT stack output into a usable form The goal is to transform something like this: @@ -323,7 +388,7 @@ class FilterModule(object): "value_B2" ] } - ''' + """ # Extract the “outputs” JSON snippet from the pretty-printed array in_outputs = False @@ -352,7 +417,7 @@ class FilterModule(object): @staticmethod # pylint: disable=too-many-branches def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames): - ''' Parses names from list of certificate hashes. + """ Parses names from list of certificate hashes. Ex: certificates = [{ "certfile": "/root/custom1.crt", "keyfile": "/root/custom1.key" }, @@ -366,11 +431,11 @@ class FilterModule(object): { "certfile": "/etc/origin/master/named_certificates/custom2.crt", "keyfile": "/etc/origin/master/named_certificates/custom2.key", "names": [ "some-hostname.com" ] }] - ''' - if not issubclass(type(named_certs_dir), unicode): - raise errors.AnsibleFilterError("|failed expects named_certs_dir is unicode") + """ + if not isinstance(named_certs_dir, basestring): + raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode") - if not issubclass(type(internal_hostnames), list): + if not isinstance(internal_hostnames, list): raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") for certificate in certificates: @@ -410,7 +475,7 @@ class FilterModule(object): @staticmethod def oo_pretty_print_cluster(data): - ''' Read a subset of hostvars and build a summary of the cluster + """ Read a subset of hostvars and build a summary of the cluster in the following layout: "c_id": { @@ -427,14 +492,14 @@ class FilterModule(object): ... ] } - ''' + """ def _get_tag_value(tags, key): - ''' Extract values of a map implemented as a set. + """ Extract values of a map implemented as a set. Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' } key = 'bar' returns 'value2' - ''' + """ for tag in tags: if tag[:len(key)+4] == 'tag_' + key: return tag[len(key)+5:] @@ -445,7 +510,7 @@ class FilterModule(object): host_type, sub_host_type, host): - ''' Add a new host in the clusters data structure ''' + """ Add a new host in the clusters data structure """ if clusterid not in clusters: clusters[clusterid] = {} if host_type not in clusters[clusterid]: @@ -470,9 +535,9 @@ class FilterModule(object): @staticmethod def oo_generate_secret(num_bytes): - ''' generate a session secret ''' + """ generate a session secret """ - if not issubclass(type(num_bytes), int): + if not isinstance(num_bytes, int): raise errors.AnsibleFilterError("|failed expects num_bytes is int") secret = os.urandom(num_bytes) @@ -480,7 +545,7 @@ class FilterModule(object): @staticmethod def to_padded_yaml(data, level=0, indent=2, **kw): - ''' returns a yaml snippet padded to match the indent level you specify ''' + """ returns a yaml snippet padded to match the indent level you specify """ if data in [None, ""]: return "" @@ -492,7 +557,7 @@ class FilterModule(object): raise errors.AnsibleFilterError('Failed to convert: %s', my_e) def filters(self): - ''' returns a mapping of filters to methods ''' + """ returns a mapping of filters to methods """ return { "oo_select_keys": self.oo_select_keys, "oo_select_keys_from_list": self.oo_select_keys_from_list, @@ -512,4 +577,5 @@ class FilterModule(object): "oo_pretty_print_cluster": self.oo_pretty_print_cluster, "oo_generate_secret": self.oo_generate_secret, "to_padded_yaml": self.to_padded_yaml, + "oo_nodes_with_label": self.oo_nodes_with_label, } diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 1a1445835..051a6d966 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.36 +Version: 3.0.37 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,40 @@ Atomic OpenShift Utilities includes %changelog +* Fri Jan 29 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.37-1 +- Adding ip address option (kwoodson@redhat.com) +- Enable cockpit when not is_atomic. (abutcher@redhat.com) +- Explicitly restart the atomic node service after configuring it for nuage + (vishal.patil@nuagenetworks.net) +- Fix for bug 1298 (vishal.patil@nuagenetworks.net) +- fixing logic for skipping symlinks (kwoodson@redhat.com) +- Allow to have custom bucket name and region (florian.lambert@enovance.com) +- Add inventory example for logrotate_scripts (abutcher@redhat.com) +- Minor readme cleanup for Bug 1271566 (bleanhar@redhat.com) +- fix template trigger calc (jdiaz@redhat.com) +- Configure logrotate on atomic. (abutcher@redhat.com) +- Comparing zbx_host interfaces and removing duplicate hostgroup_names + (kwoodson@redhat.com) +- Dockerfile: Require pyOpenSSL (gscrivan@redhat.com) +- replace yum with dnf (spartacus06@gmail.com) +- Install cockpit, logrotate and fluentd unless host is atomic. + (abutcher@redhat.com) +- zabbix: added the skydns items and triggers (mwoodson@redhat.com) +- fix pkg_version (spinolacastro@gmail.com) +- Expose data_dir (spinolacastro@gmail.com) +- Fix checking for update package availability (nikolai@prokoschenko.de) +- Fix oo_pretty_print_cluster following the renaming of `env` into `clusterid` + (lhuard@amadeus.com) +- Ensure openssl present for etcd_ca (jdetiber@redhat.com) +- Update Docs and test for testing ansible version (jdetiber@redhat.com) +- Add Nuage support to openshift ansible (vishpat@gmail.com) +- Updating for host monitoring HA masters (kwoodson@redhat.com) +- adhoc s3 registry - add auth part in the registry config sample + (gael.lambert@enovance.com) +- Move the `is_atomic` check from `update_repos_and_packages.yml` to + `rhel_subscribe` (lhuard@amadeus.com) +- Increase OpenStack stack creation/deletion timeout (lhuard@amadeus.com) + * Mon Jan 25 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.36-1 - Fixing awsutil to support aliases and v3 (kwoodson@redhat.com) - Fail when master restart playbook finds no active masters rather than any diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml index a19291a9f..b6dde357e 100644 --- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -57,7 +57,7 @@ # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - name: update zabbix docker items - command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py + command: docker exec -i oso-rhel7-host-monitoring /usr/local/bin/cron-send-docker-metrics.py # Get and show docker info again. - name: Get docker info diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index abdb23d78..33fcf6af5 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,4 +1,3 @@ ---- - include: ../../common/openshift-cluster/config.yml vars_files: - ../../aws/openshift-cluster/vars.yml @@ -10,5 +9,8 @@ openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" + openshift_router_selector: 'type=infra' + openshift_infra_nodes: "{{ g_infra_hosts }}" + openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}' + openshift_master_cluster_method: 'native' diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml index c2135cd03..7e3a47964 100644 --- a/playbooks/aws/openshift-cluster/scaleup.yml +++ b/playbooks/aws/openshift-cluster/scaleup.yml @@ -29,5 +29,4 @@ openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 4b9c80b14..c20f370bf 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -74,4 +74,4 @@ tags: Name: "{{ item.item.item.ec2_tag_Name }}-terminate" with_items: ec2_stop.results - when: "'oo_hosts_to_terminate' in groups" + when: ec2_stop | changed diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index 88736ee03..1474bb3ca 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -8,6 +8,5 @@ ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] - and not openshift.common.is_atomic | bool - openshift_repos - os_update_latest diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index a0d21451f..6f86703d6 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -164,6 +164,11 @@ | list ) }}" master_cert_subdir: master-{{ openshift.common.hostname }} master_cert_config_dir: "{{ openshift.common.config_base }}/master" + - set_fact: + openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) + | oo_nodes_with_label('region', 'infra') + | oo_collect('inventory_hostname') }}" + when: openshift_infra_nodes is not defined - name: Configure master certificates hosts: oo_first_master @@ -368,7 +373,7 @@ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" roles: - role: cockpit - when: not openshift.common.is_containerized and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and + when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and (osm_use_cockpit | bool or osm_use_cockpit is undefined ) - name: Configure flannel diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index dc3c25107..81ec9ab6d 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -206,6 +206,7 @@ hosts: oo_nodes_to_config vars: # TODO: Prefix flannel role variables. + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" roles: diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 84a3f84d4..8bc9b1e53 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -13,3 +13,6 @@ openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" + openshift_router_selector: 'type=infra' + openshift_infra_nodes: "{{ g_infra_hosts }}" + openshift_master_cluster_method: 'native' diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index be9cbbfaa..9bd99c4fc 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -13,3 +13,6 @@ openshift_cluster_id: "{{ cluster_id }}" openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" + openshift_router_selector: 'type=infra' + openshift_infra_nodes: "{{ g_infra_hosts }}" + openshift_master_cluster_method: 'native' diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index b338d2eb4..5128e767f 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -11,3 +11,6 @@ openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ansible_default_ipv4.address }}" + openshift_router_selector: 'type=infra' + openshift_infra_nodes: "{{ g_infra_hosts }}" + openshift_master_cluster_method: 'native' diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index fdcb77acc..76cc64a73 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -29,6 +29,7 @@ - name: Create or Update OpenStack Stack command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} + --timeout 3 --enable-rollback -P cluster_env={{ cluster_env }} -P cluster_id={{ cluster_id }} -P cidr={{ openstack_network_cidr }} @@ -56,7 +57,7 @@ register: stack_show_status_result until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] retries: 30 - delay: 1 + delay: 5 failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - name: Read OpenStack Stack outputs diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index d4ab51fa7..7a86b78c5 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -43,6 +43,6 @@ register: stack_show_result until: stack_show_result.stdout != 'DELETE_IN_PROGRESS' retries: 60 - delay: 1 + delay: 5 failed_when: '"Stack not found" not in stack_show_result.stderr and stack_show_result.stdout != "DELETE_COMPLETE"' diff --git a/roles/lib_dyn/README.md b/roles/lib_dyn/README.md new file mode 100644 index 000000000..1eec9f81c --- /dev/null +++ b/roles/lib_dyn/README.md @@ -0,0 +1,27 @@ +lib_dyn +========= + +A role containing the dyn_record module for managing DNS records through Dyn's +API + +Requirements +------------ + +The module requires the `dyn` python module for interacting with the Dyn API. +https://github.com/dyninc/dyn-python + +Example Playbook +---------------- + +To make sure the `dyn_record` module is available for use include the role +before it is used. + + - hosts: servers + roles: + - lib_dyn + +License +------- + +Apache + diff --git a/roles/lib_dyn/library/dyn_record.py b/roles/lib_dyn/library/dyn_record.py new file mode 100644 index 000000000..5e088a674 --- /dev/null +++ b/roles/lib_dyn/library/dyn_record.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# +# (c) 2015, Russell Harrison <rharriso@redhat.com> +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +'''Ansible module to manage records in the Dyn Managed DNS service''' +DOCUMENTATION = ''' +--- +module: dyn_record +version_added: "1.9" +short_description: Manage records in the Dyn Managed DNS service. +description: + - "Manages DNS records via the REST API of the Dyn Managed DNS service. It + - "handles records only; there is no manipulation of zones or account support" + - "yet. See: U(https://help.dyn.com/dns-api-knowledge-base/)" +options: + state: + description: + -"Whether the record should be c(present) or c(absent). Optionally the" + - "state c(list) can be used to return the current value of a record." + required: true + choices: [ 'present', 'absent', 'list' ] + default: present + + customer_name: + description: + - "The Dyn customer name for your account. If not set the value of the" + - "c(DYNECT_CUSTOMER_NAME) environment variable is used." + required: false + default: nil + + user_name: + description: + - "The Dyn user name to log in with. If not set the value of the" + - "c(DYNECT_USER_NAME) environment variable is used." + required: false + default: null + + user_password: + description: + - "The Dyn user's password to log in with. If not set the value of the" + - "c(DYNECT_PASSWORD) environment variable is used." + required: false + default: null + + zone: + description: + - "The DNS zone in which your record is located." + required: true + default: null + + record_fqdn: + description: + - "Fully qualified domain name of the record name to get, create, delete," + - "or update." + required: true + default: null + + record_type: + description: + - "Record type." + required: true + choices: [ 'A', 'AAAA', 'CNAME', 'PTR', 'TXT' ] + default: null + + record_value: + description: + - "Record value. If record_value is not specified; no changes will be" + - "made and the module will fail" + required: false + default: null + + record_ttl: + description: + - 'Record's "Time to live". Number of seconds the record remains cached' + - 'in DNS servers or c(0) to use the default TTL for the zone.' + required: false + default: 0 + +notes: + - The module makes a broad assumption that there will be only one record per "node" (FQDN). + - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. + +requirements: [ dyn ] +author: "Russell Harrison" +''' + +try: + IMPORT_ERROR = False + from dyn.tm.session import DynectSession + from dyn.tm.zones import Zone + import dyn.tm.errors + import os + +except ImportError as error: + IMPORT_ERROR = str(error) + +# Each of the record types use a different method for the value. +RECORD_PARAMS = { + 'A' : {'value_param': 'address'}, + 'AAAA' : {'value_param': 'address'}, + 'CNAME' : {'value_param': 'cname'}, + 'PTR' : {'value_param': 'ptrdname'}, + 'TXT' : {'value_param': 'txtdata'} +} + +# You'll notice that the value_param doesn't match the key (records_key) +# in the dict returned from Dyn when doing a dyn_node.get_all_records() +# This is a frustrating lookup dict to allow mapping to the RECORD_PARAMS +# dict so we can lookup other values in it efficiently + +def get_record_type(record_key): + '''Get the record type represented by the keys returned from get_any_records.''' + return record_key.replace('_records', '').upper() + +def get_record_key(record_type): + '''Get the key to look up records in the dictionary returned from get_any_records.''' + return record_type.lower() + '_records' + +def get_any_records(module, node): + '''Get any records for a given node''' + # Lets get a list of the A records for the node + try: + records = node.get_any_records() + except dyn.tm.errors.DynectGetError as error: + if 'Not in zone' in str(error): + # The node isn't in the zone so we'll return an empty dictionary + return {} + else: + # An unknown error happened so we'll need to return it. + module.fail_json(msg='Unable to get records', + error=str(error)) + + # Return a dictionary of the record objects + return records + +def get_record_values(records): + '''Get the record values for each record returned by get_any_records.''' + # This simply returns the values from a dictionary of record objects + ret_dict = {} + for key in records.keys(): + record_type = get_record_type(key) + record_value_param = RECORD_PARAMS[record_type]['value_param'] + ret_dict[key] = [getattr(elem, record_value_param) for elem in records[key]] + return ret_dict + +def main(): + '''Ansible module for managing Dyn DNS records.''' + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['present', 'absent', 'list']), + customer_name=dict(default=os.environ.get('DYNECT_CUSTOMER_NAME', None), type='str'), + user_name=dict(default=os.environ.get('DYNECT_USER_NAME', None), type='str', no_log=True), + user_password=dict(default=os.environ.get('DYNECT_PASSWORD', None), type='str', no_log=True), + zone=dict(required=True), + record_fqdn=dict(required=False), + record_type=dict(required=False, choices=[ + 'A', 'AAAA', 'CNAME', 'PTR', 'TXT']), + record_value=dict(required=False), + record_ttl=dict(required=False, default=0, type='int'), + ), + required_together=( + ['record_fqdn', 'record_value', 'record_ttl', 'record_type'] + ) + ) + + if IMPORT_ERROR: + module.fail_json(msg="Unable to import dyn module: https://pypi.python.org/pypi/dyn", + error=IMPORT_ERROR) + + # Start the Dyn session + try: + _ = DynectSession(module.params['customer_name'], + module.params['user_name'], + module.params['user_password']) + except dyn.tm.errors.DynectAuthError as error: + module.fail_json(msg='Unable to authenticate with Dyn', + error=str(error)) + + # Retrieve zone object + try: + dyn_zone = Zone(module.params['zone']) + except dyn.tm.errors.DynectGetError as error: + if 'No such zone' in str(error): + module.fail_json( + msg="Not a valid zone for this account", + zone=module.params['zone'] + ) + else: + module.fail_json(msg="Unable to retrieve zone", + error=str(error)) + + + # To retrieve the node object we need to remove the zone name from the FQDN + dyn_node_name = module.params['record_fqdn'].replace('.' + module.params['zone'], '') + + # Retrieve the zone object from dyn + dyn_zone = Zone(module.params['zone']) + + # Retrieve the node object from dyn + dyn_node = dyn_zone.get_node(node=dyn_node_name) + + # All states will need a list of the exiting records for the zone. + dyn_node_records = get_any_records(module, dyn_node) + + if module.params['state'] == 'list': + module.exit_json(changed=False, + records=get_record_values( + dyn_node_records, + )) + + if module.params['state'] == 'present': + + # First get a list of existing records for the node + values = get_record_values(dyn_node_records) + value_key = get_record_key(module.params['record_type']) + + # Check to see if the record is already in place before doing anything. + if (dyn_node_records and + dyn_node_records[value_key][0].ttl == module.params['record_ttl'] and + module.params['record_value'] in values[value_key]): + + module.exit_json(changed=False) + + + # Working on the assumption that there is only one record per + # node we will first delete the node if there are any records before + # creating the correct record + if dyn_node_records: + dyn_node.delete() + + # Now lets create the correct node entry. + dyn_zone.add_record(dyn_node_name, + module.params['record_type'], + module.params['record_value'], + module.params['record_ttl'] + ) + + # Now publish the zone since we've updated it. + dyn_zone.publish() + module.exit_json(changed=True, + msg="Created node %s in zone %s" % (dyn_node_name, module.params['zone'])) + + if module.params['state'] == 'absent': + # If there are any records present we'll want to delete the node. + if dyn_node_records: + dyn_node.delete() + # Publish the zone since we've modified it. + dyn_zone.publish() + module.exit_json(changed=True, + msg="Removed node %s from zone %s" % (dyn_node_name, module.params['zone'])) + else: + module.exit_json(changed=False) + +# Ansible tends to need a wild card import so we'll use it here +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/roles/lib_dyn/meta/main.yml b/roles/lib_dyn/meta/main.yml new file mode 100644 index 000000000..5475c6971 --- /dev/null +++ b/roles/lib_dyn/meta/main.yml @@ -0,0 +1,33 @@ +--- +galaxy_info: + author: Russell Harrison + description: A role to provide the dyn_record module + company: Red Hat, Inc. + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + license: Apache + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + #- name: Fedora + # versions: + # - 19 + # - 20 + # - 21 + # - 22 + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + categories: + - networking +dependencies: [] + # List your role dependencies here, one per line. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + # + # No role dependencies at this time. The module contained in this role does + # require the dyn python module. + # https://pypi.python.org/pypi/dyn + diff --git a/roles/lib_dyn/tasks/main.yml b/roles/lib_dyn/tasks/main.yml new file mode 100644 index 000000000..9b3b1b0b9 --- /dev/null +++ b/roles/lib_dyn/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# tasks file for lib_dyn + +- name: Make sure python-dyn is installed + yum: name=python-dyn state=present diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml index d82d4b67b..25482a845 100644 --- a/roles/nuage_node/handlers/main.yaml +++ b/roles/nuage_node/handlers/main.yaml @@ -2,3 +2,7 @@ - name: restart vrs sudo: true service: name=openvswitch state=restarted + +- name: restart node + sudo: true + service: name={{ openshift.common.service_type }}-node state=restarted diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json index 11767862d..68438b538 100644 --- a/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json @@ -87,6 +87,13 @@ { "name": "mongodb", "image": "mongodb", + "readinessProbe": { + "tcpSocket":{ + "port": 27017 + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 1 + }, "ports": [ { "containerPort": 27017, diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json index 97b315600..e90ed6fa8 100644 --- a/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json @@ -104,6 +104,13 @@ { "name": "mongodb", "image": "mongodb", + "readinessProbe": { + "tcpSocket":{ + "port": 27017 + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 1 + }, "ports": [ { "containerPort": 27017, diff --git a/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json index 51805d729..a327c0215 100644 --- a/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json @@ -15,8 +15,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "2.2" + "kind": "ImageStreamTag", + "name": "2.2" } }, { @@ -30,8 +30,8 @@ "sampleRepo": "https://github.com/openshift/ruby-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "openshift/ruby-20-centos7:latest" + "kind": "DockerImage", + "name": "openshift/ruby-20-centos7:latest" } }, { @@ -45,8 +45,8 @@ "sampleRepo": "https://github.com/openshift/ruby-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "centos/ruby-22-centos7:latest" + "kind": "DockerImage", + "name": "centos/ruby-22-centos7:latest" } } ] @@ -64,8 +64,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "0.10" + "kind": "ImageStreamTag", + "name": "0.10" } }, { @@ -79,8 +79,8 @@ "sampleRepo": "https://github.com/openshift/nodejs-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "openshift/nodejs-010-centos7:latest" + "kind": "DockerImage", + "name": "openshift/nodejs-010-centos7:latest" } } ] @@ -98,8 +98,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "5.20" + "kind": "ImageStreamTag", + "name": "5.20" } }, { @@ -113,8 +113,8 @@ "sampleRepo": "https://github.com/openshift/dancer-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "openshift/perl-516-centos7:latest" + "kind": "DockerImage", + "name": "openshift/perl-516-centos7:latest" } }, { @@ -128,8 +128,8 @@ "sampleRepo": "https://github.com/openshift/dancer-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "centos/perl-520-centos7:latest" + "kind": "DockerImage", + "name": "centos/perl-520-centos7:latest" } } @@ -148,8 +148,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "5.6" + "kind": "ImageStreamTag", + "name": "5.6" } }, { @@ -163,8 +163,8 @@ "sampleRepo": "https://github.com/openshift/cakephp-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "openshift/php-55-centos7:latest" + "kind": "DockerImage", + "name": "openshift/php-55-centos7:latest" } }, { @@ -178,8 +178,8 @@ "sampleRepo": "https://github.com/openshift/cakephp-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "centos/php-56-centos7:latest" + "kind": "DockerImage", + "name": "centos/php-56-centos7:latest" } } ] @@ -197,8 +197,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "3.4" + "kind": "ImageStreamTag", + "name": "3.4" } }, { @@ -212,8 +212,8 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "openshift/python-33-centos7:latest" + "kind": "DockerImage", + "name": "openshift/python-33-centos7:latest" } }, { @@ -227,8 +227,8 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "centos/python-27-centos7:latest" + "kind": "DockerImage", + "name": "centos/python-27-centos7:latest" } }, { @@ -242,8 +242,8 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "centos/python-34-centos7:latest" + "kind": "DockerImage", + "name": "centos/python-34-centos7:latest" } } ] @@ -261,8 +261,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "8.1" + "kind": "ImageStreamTag", + "name": "8.1" } }, { @@ -276,8 +276,8 @@ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git" }, "from": { - "Kind": "DockerImage", - "Name": "openshift/wildfly-81-centos7:latest" + "kind": "DockerImage", + "name": "openshift/wildfly-81-centos7:latest" } } ] @@ -295,22 +295,22 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "5.6" + "kind": "ImageStreamTag", + "name": "5.6" } }, { "name": "5.5", "from": { - "Kind": "DockerImage", - "Name": "openshift/mysql-55-centos7:latest" + "kind": "DockerImage", + "name": "openshift/mysql-55-centos7:latest" } }, { "name": "5.6", "from": { - "Kind": "DockerImage", - "Name": "centos/mysql-56-centos7:latest" + "kind": "DockerImage", + "name": "centos/mysql-56-centos7:latest" } } ] @@ -328,22 +328,22 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "9.4" + "kind": "ImageStreamTag", + "name": "9.4" } }, { "name": "9.2", "from": { - "Kind": "DockerImage", - "Name": "openshift/postgresql-92-centos7:latest" + "kind": "DockerImage", + "name": "openshift/postgresql-92-centos7:latest" } }, { "name": "9.4", "from": { - "Kind": "DockerImage", - "Name": "centos/postgresql-94-centos7:latest" + "kind": "DockerImage", + "name": "centos/postgresql-94-centos7:latest" } } ] @@ -361,22 +361,22 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "2.6" + "kind": "ImageStreamTag", + "name": "2.6" } }, { "name": "2.4", "from": { - "Kind": "DockerImage", - "Name": "openshift/mongodb-24-centos7:latest" + "kind": "DockerImage", + "name": "openshift/mongodb-24-centos7:latest" } }, { "name": "2.6", "from": { - "Kind": "DockerImage", - "Name": "centos/mongodb-26-centos7:latest" + "kind": "DockerImage", + "name": "centos/mongodb-26-centos7:latest" } } ] @@ -394,15 +394,15 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "1" + "kind": "ImageStreamTag", + "name": "1" } }, { "name": "1", "from": { - "Kind": "DockerImage", - "Name": "openshift/jenkins-1-centos7:latest" + "kind": "DockerImage", + "name": "openshift/jenkins-1-centos7:latest" } } ] diff --git a/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json index 3092ee486..3f5f713b4 100644 --- a/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json @@ -15,8 +15,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "2.2" + "kind": "ImageStreamTag", + "name": "2.2" } }, { @@ -30,8 +30,8 @@ "sampleRepo": "https://github.com/openshift/ruby-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/ruby-20-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/ruby-20-rhel7:latest" } }, { @@ -45,8 +45,8 @@ "sampleRepo": "https://github.com/openshift/ruby-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/ruby-22-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/ruby-22-rhel7:latest" } } ] @@ -64,8 +64,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "0.10" + "kind": "ImageStreamTag", + "name": "0.10" } }, { @@ -79,8 +79,8 @@ "sampleRepo": "https://github.com/openshift/nodejs-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/nodejs-010-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/nodejs-010-rhel7:latest" } } ] @@ -98,8 +98,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "5.20" + "kind": "ImageStreamTag", + "name": "5.20" } }, { @@ -113,8 +113,8 @@ "sampleRepo": "https://github.com/openshift/dancer-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/perl-516-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/perl-516-rhel7:latest" } }, { @@ -128,8 +128,8 @@ "sampleRepo": "https://github.com/openshift/dancer-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest" } } @@ -148,8 +148,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "5.6" + "kind": "ImageStreamTag", + "name": "5.6" } }, { @@ -163,8 +163,8 @@ "sampleRepo": "https://github.com/openshift/cakephp-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/php-55-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/php-55-rhel7:latest" } }, { @@ -178,8 +178,8 @@ "sampleRepo": "https://github.com/openshift/cakephp-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest" } } ] @@ -197,8 +197,8 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "3.4" + "kind": "ImageStreamTag", + "name": "3.4" } }, { @@ -212,8 +212,8 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/python-33-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/python-33-rhel7:latest" } }, { @@ -227,8 +227,8 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/python-27-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/python-27-rhel7:latest" } }, { @@ -242,8 +242,8 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/python-34-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/python-34-rhel7:latest" } } ] @@ -261,22 +261,22 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "5.6" + "kind": "ImageStreamTag", + "name": "5.6" } }, { "name": "5.5", "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/mysql-55-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/mysql-55-rhel7:latest" } }, { "name": "5.6", "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest" } } ] @@ -294,22 +294,22 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "9.4" + "kind": "ImageStreamTag", + "name": "9.4" } }, { "name": "9.2", "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/postgresql-92-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/postgresql-92-rhel7:latest" } }, { "name": "9.4", "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/postgresql-94-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/postgresql-94-rhel7:latest" } } ] @@ -327,22 +327,22 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "2.6" + "kind": "ImageStreamTag", + "name": "2.6" } }, { "name": "2.4", "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/mongodb-24-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/mongodb-24-rhel7:latest" } }, { "name": "2.6", "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/rhscl/mongodb-26-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/mongodb-26-rhel7:latest" } } ] @@ -360,15 +360,15 @@ { "name": "latest", "from": { - "Kind": "ImageStreamTag", - "Name": "1" + "kind": "ImageStreamTag", + "name": "1" } }, { "name": "1", "from": { - "Kind": "DockerImage", - "Name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest" + "kind": "DockerImage", + "name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest" } } ] diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 9a5eebc66..a5731be09 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -8,7 +8,7 @@ - name: Import RHEL streams command: > {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ rhel_image_streams }} - when: openshift_examples_load_rhel + when: openshift_examples_load_rhel | bool register: oex_import_rhel_streams failed_when: "'already exists' not in oex_import_rhel_streams.stderr and oex_import_rhel_streams.rc != 0" changed_when: false diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 57b50bee4..aa5e593b6 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -82,7 +82,7 @@ registry_selector: "{{ openshift_registry_selector | default(None) }}" api_server_args: "{{ osm_api_server_args | default(None) }}" controller_args: "{{ osm_controller_args | default(None) }}" - infra_nodes: "{{ num_infra | default(None) }}" + infra_nodes: "{{ openshift_infra_nodes | default(None) }}" disabled_features: "{{ osm_disabled_features | default(None) }}" master_count: "{{ openshift_master_count | default(None) }}" controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}" diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 0b42ca6d5..6c02b26bf 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -6,6 +6,7 @@ PartOf=docker.service [Service] ExecStartPre=-/usr/bin/docker rm -f openvswitch ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }} +ExecStartPost=/usr/bin/sleep 5 ExecStop=/usr/bin/docker stop openvswitch Restart=always diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml index 498a65127..355cbf84b 100644 --- a/roles/openshift_router/tasks/main.yml +++ b/roles/openshift_router/tasks/main.yml @@ -1,14 +1,9 @@ --- - -- set_fact: _ortr_images="--images='{{ openshift.master.registry_url }}'" - -- set_fact: _ortr_selector="--selector='{{ openshift.master.router_selector }}'" - - name: Deploy OpenShift Router command: > {{ openshift.common.admin_binary }} router - --create --replicas={{ openshift.master.infra_nodes }} - --service-account=router {{ _ortr_selector }} - --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images }} - register: _ortr_results - changed_when: "'service exists' not in _ortr_results.stdout" + --create --replicas={{ openshift.master.infra_nodes | length }} + --service-account=router {{ ortr_selector }} + --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ ortr_images }} + register: ortr_results + changed_when: "'service exists' not in ortr_results.stdout" diff --git a/roles/openshift_router/vars/main.yml b/roles/openshift_router/vars/main.yml index 9967e26f4..bcac12068 100644 --- a/roles/openshift_router/vars/main.yml +++ b/roles/openshift_router/vars/main.yml @@ -1,2 +1,4 @@ --- openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +ortr_images: "--images='{{ openshift.master.registry_url }}'" +ortr_selector: "--selector='{{ openshift.master.router_selector }}'" diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 12ea36c8b..9d20eb012 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -20,13 +20,26 @@ g_template_openshift_master: - Openshift Master - key: openshift.master.api.ping - description: "Verify that the Openshift API is up" + description: "Verify that the Openshift API is up (uses the cluster API URL)" + type: int + applications: + - Openshift Master + + - key: openshift.master.local.api.ping + description: "Verify that the Openshift API is up on the host (uses the API URL as the https://127.0.0.1)" type: int applications: - Openshift Master - key: openshift.master.api.healthz - description: "Checks the healthz check of the master's api: https://master_host/healthz" + description: "Checks the healthz check of the master's api: https://<cluster_api_url>/healthz" + type: int + data_type: bool + applications: + - Openshift Master + + - key: openshift.master.local.api.healthz + description: "Checks the healthz check of the master's api: https://127.0.0.1/healthz" type: int data_type: bool applications: @@ -292,6 +305,11 @@ g_template_openshift_master: - name: 'Openshift Master API health check is failing on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + priority: high + + - name: 'Openshift Master Local API health check is failing on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.local.api.healthz.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' dependencies: - 'Openshift Master process not running on {HOST.NAME}' priority: high @@ -299,6 +317,11 @@ g_template_openshift_master: - name: 'Openshift Master API PING check is failing on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + priority: high + + - name: 'Openshift Master Local API PING check is failing on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.local.api.ping.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' dependencies: - 'Openshift Master process not running on {HOST.NAME}' priority: high diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml index ce28b1048..ff65ef158 100644 --- a/roles/os_zabbix/vars/template_openshift_node.yml +++ b/roles/os_zabbix/vars/template_openshift_node.yml @@ -20,6 +20,12 @@ g_template_openshift_node: applications: - Openshift Node + - key: openshift.node.ovs.stray.rules + description: Number of OVS stray rules found/removed + type: int + applications: + - Openshift Node + ztriggers: - name: 'Openshift Node process not running on {HOST.NAME}' expression: '{Template Openshift Node:openshift.node.process.count.max(#3)}<1' diff --git a/roles/rhel_subscribe/meta/main.yml b/roles/rhel_subscribe/meta/main.yml new file mode 100644 index 000000000..bbc3ad172 --- /dev/null +++ b/roles/rhel_subscribe/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - openshift_facts diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index eecfd04a0..85e17ff9d 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -41,4 +41,5 @@ command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }} - include: enterprise.yml - when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ] + when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ] and + not openshift.common.is_atomic | bool |