From 8d80cf21e73a1015617c9ec8cd183602436e54fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Fri, 17 Jul 2015 16:46:13 +0200 Subject: Make the playbooks friendlier with SELinux disabled hosts --- roles/openshift_node/tasks/main.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e18846db8..5188df973 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -63,11 +63,13 @@ lineinfile: dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*' - line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} --selinux-enabled'" + line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \ +{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %}'" when: docker_check.stat.isreg - name: Allow NFS access for VMs seboolean: name=virt_use_nfs state=yes persistent=yes + when: ansible_selinux and ansible_selinux.status == "enabled" - name: Start and enable openshift-node service: name=openshift-node enabled=yes state=started -- cgit v1.2.3 From 8f951b8d458c9ec58614a65e4dad4f3ce2b74293 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pep=20Turr=C3=B3=20Mauri?= Date: Thu, 16 Jul 2015 16:46:33 +0200 Subject: Reference deployment_type instead of hardcoding origin --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index a832ae84e..f0aa0387b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -56,7 +56,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| "nodes" => ["node1", "node2"], } ansible.extra_vars = { - openshift_deployment_type: "origin", + openshift_deployment_type: deployment_type, } ansible.playbook = "playbooks/byo/config.yml" end -- cgit v1.2.3 From 5d7753a8ecb03634f045b057dc33369178615f92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pep=20Turr=C3=B3=20Mauri?= Date: Sat, 1 Aug 2015 17:22:24 +0200 Subject: Deploying enterprise with Vagrant --- README_vagrant.md | 28 ++++++++++++++++++++++++++-- Vagrantfile | 39 +++++++++++++++++++++++++++++++++------ playbooks/byo/vagrant.yml | 14 ++++++++++++++ 3 files changed, 73 insertions(+), 8 deletions(-) create mode 100644 playbooks/byo/vagrant.yml diff --git a/README_vagrant.md b/README_vagrant.md index 26ec52c0a..5f87d6633 100644 --- a/README_vagrant.md +++ b/README_vagrant.md @@ -2,9 +2,28 @@ Requirements ------------ - vagrant (tested against version 1.7.2) - vagrant-hostmanager plugin (tested against version 1.5.0) +- vagrant-registration plugin (only required for enterprise deployment type) - vagrant-libvirt (tested against version 0.0.26) - Only required if using libvirt instead of virtualbox +For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant: + +1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads) + +2. Install it into vagrant + + ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box`` + +3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt) + + Resize the actual qcow2 image: + + ``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB`` + + Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size. A corrected metadata.json looks like this: + + ``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}`` + Usage ----- ``` @@ -21,5 +40,10 @@ vagrant provision Environment Variables --------------------- The following environment variables can be overriden: -- OPENSHIFT_DEPLOYMENT_TYPE (defaults to origin, choices: origin, enterprise, online) -- OPENSHIFT_NUM_NODES (the number of nodes to create, defaults to 2) +- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online) +- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2) + +For ``enterprise`` deployment types these env variables should also be specified: +- ``rhel_subscription_user``: rhsm user +- ``rhel_subscription_pass``: rhsm password +- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects diff --git a/Vagrantfile b/Vagrantfile index f0aa0387b..20cf0b5bd 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -15,6 +15,28 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.hostmanager.manage_host = true config.hostmanager.include_offline = true config.ssh.insert_key = false + + if deployment_type === 'enterprise' + unless Vagrant.has_plugin?('vagrant-registration') + raise 'vagrant-registration-plugin is required for enterprise deployment' + end + username = ENV['rhel_subscription_user'] + password = ENV['rhel_subscription_pass'] + unless username and password + raise 'rhel_subscription_user and rhel_subscription_pass are required' + end + config.registration.username = username + config.registration.password = password + # FIXME this is temporary until vagrant/ansible registration modules + # are capable of handling specific subscription pools + if not ENV['rhel_subscription_pool'].nil? + config.vm.provision "shell" do |s| + s.inline = "subscription-manager attach --pool=$1 || true" + s.args = "#{ENV['rhel_subscription_pool']}" + end + end + end + config.vm.provider "virtualbox" do |vbox, override| override.vm.box = "chef/centos-7.1" vbox.memory = 1024 @@ -28,10 +50,15 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| libvirt.cpus = 2 libvirt.memory = 1024 libvirt.driver = 'kvm' - override.vm.box = "centos-7.1" - override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box" - override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05" - override.vm.box_download_checksum_type = "sha256" + case deployment_type + when "enterprise" + override.vm.box = "rhel-7" + when "origin" + override.vm.box = "centos-7.1" + override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box" + override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05" + override.vm.box_download_checksum_type = "sha256" + end end num_nodes.times do |n| @@ -53,12 +80,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ansible.sudo = true ansible.groups = { "masters" => ["master"], - "nodes" => ["node1", "node2"], + "nodes" => ["master", "node1", "node2"], } ansible.extra_vars = { openshift_deployment_type: deployment_type, } - ansible.playbook = "playbooks/byo/config.yml" + ansible.playbook = "playbooks/byo/vagrant.yml" end end end diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml new file mode 100644 index 000000000..c89f8775b --- /dev/null +++ b/playbooks/byo/vagrant.yml @@ -0,0 +1,14 @@ +--- +- hosts: all + vars: + deployment_type: "{{ openshift_deployment_type }}" + roles: + - role: rhel_subscribe + when: openshift_deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + - openshift_repos + - os_update_latest + +- include: config.yml -- cgit v1.2.3 From 3548472edd08d09fafcb236790a44bcf31aa5f03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pep=20Turr=C3=B3=20Mauri?= Date: Mon, 3 Aug 2015 16:29:25 +0200 Subject: Move rhel_subscribe tasks to its own playbook Allows reuse out of vagrant, e.g. to subscribe systems by its own --- playbooks/byo/rhel_subscribe.yml | 12 ++++++++++++ playbooks/byo/vagrant.yml | 12 +----------- 2 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 playbooks/byo/rhel_subscribe.yml diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml new file mode 100644 index 000000000..60300c3dc --- /dev/null +++ b/playbooks/byo/rhel_subscribe.yml @@ -0,0 +1,12 @@ +--- +- hosts: all + vars: + deployment_type: "{{ openshift_deployment_type }}" + roles: + - role: rhel_subscribe + when: openshift_deployment_type == "enterprise" and + ansible_distribution == "RedHat" and + lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | + default('no', True) | lower in ['no', 'false'] + - openshift_repos + - os_update_latest diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml index c89f8775b..76246e7b0 100644 --- a/playbooks/byo/vagrant.yml +++ b/playbooks/byo/vagrant.yml @@ -1,14 +1,4 @@ --- -- hosts: all - vars: - deployment_type: "{{ openshift_deployment_type }}" - roles: - - role: rhel_subscribe - when: openshift_deployment_type == "enterprise" and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - - openshift_repos - - os_update_latest +- include: rhel_subscribe.yml - include: config.yml -- cgit v1.2.3 From e438f0c19e86241e11853970aa7e94e90c5fffeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pep=20Turr=C3=B3=20Mauri?= Date: Mon, 3 Aug 2015 16:40:06 +0200 Subject: Use deployment_type, not openshift_deployment_type This seems to be what's used in other places --- Vagrantfile | 2 +- playbooks/byo/rhel_subscribe.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 20cf0b5bd..4675b5d60 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -83,7 +83,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| "nodes" => ["master", "node1", "node2"], } ansible.extra_vars = { - openshift_deployment_type: deployment_type, + deployment_type: deployment_type, } ansible.playbook = "playbooks/byo/vagrant.yml" end diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 60300c3dc..f564905ea 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,10 +1,10 @@ --- - hosts: all vars: - deployment_type: "{{ openshift_deployment_type }}" + openshift_deployment_type: "{{ deployment_type }}" roles: - role: rhel_subscribe - when: openshift_deployment_type == "enterprise" and + when: deployment_type == "enterprise" and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] -- cgit v1.2.3 From b497f7a2a77c3ef1434a5f0bd11fccf9d81b44aa Mon Sep 17 00:00:00 2001 From: Diego Castro Date: Tue, 4 Aug 2015 11:10:35 -0300 Subject: Fix node labeling. Issue #305 --- filter_plugins/oo_filters.py | 11 +++++++++++ playbooks/common/openshift-node/config.yml | 5 ++++- roles/openshift_manage_node/tasks/main.yml | 7 +++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 47033a88e..9c263f0dd 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -130,6 +130,16 @@ class FilterModule(object): rval.append("%s%s%s" % (item['key'], joiner, item['value'])) return rval + + @staticmethod + def oo_combine_dict(data, in_joiner='=', out_joiner=' '): + '''Take a dict in the form of { 'key': 'value', 'key': 'value' } and + arrange them as a string 'key=value key=value' + ''' + if not issubclass(type(data), dict): + raise errors.AnsibleFilterError("|failed expects first param is a dict") + + return out_joiner.join([ in_joiner.join([k, v]) for k, v in data.items() ]) @staticmethod def oo_ami_selector(data, image_name): @@ -309,6 +319,7 @@ class FilterModule(object): "oo_ami_selector": self.oo_ami_selector, "oo_ec2_volume_definition": self.oo_ec2_volume_definition, "oo_combine_key_value": self.oo_combine_key_value, + "oo_combine_dict": self.oo_combine_dict, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 6ef375bbb..122cfbf92 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -126,9 +126,12 @@ - name: Set scheduleability hosts: oo_first_master vars: + openshift_node_labels: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('openshift.node.labels') }}" openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift.common.hostname') }}" + | oo_collect('openshift.common.hostname') }}" openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([])) | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" pre_tasks: diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index d17f3f532..e64d6e713 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -16,3 +16,10 @@ command: > {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true with_items: openshift_scheduleable_nodes + +- name: Tag schedulable nodes + command: > + {{ openshift.common.client_binary }} label --overwrite node {{ item.0 }} {{ item.1 | oo_combine_dict }} + with_nested: + - openshift_scheduleable_nodes + - openshift_node_labels \ No newline at end of file -- cgit v1.2.3 From 6be237602331e88a330c1f46d31aeb97d9af1aa2 Mon Sep 17 00:00:00 2001 From: "Diego Castro (dscastro)" Date: Tue, 4 Aug 2015 11:53:07 -0300 Subject: Applying changes suggested by @sdodson --- roles/openshift_manage_node/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index e64d6e713..257bcee2c 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -17,9 +17,9 @@ {{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true with_items: openshift_scheduleable_nodes -- name: Tag schedulable nodes +- name: Label nodes command: > {{ openshift.common.client_binary }} label --overwrite node {{ item.0 }} {{ item.1 | oo_combine_dict }} with_nested: - - openshift_scheduleable_nodes - - openshift_node_labels \ No newline at end of file + - openshift_nodes + - openshift_node_labels -- cgit v1.2.3 From 598d9355d0ae4088f6afb11a1a12821efd0939f5 Mon Sep 17 00:00:00 2001 From: John T Skarbek Date: Wed, 5 Aug 2015 20:33:33 -0400 Subject: Removes hardcoded python2 * replaces the hard coded items in favor of pulling a users environment * resolves #383 * Feedback and/or additional testing is more than welcome --- inventory/aws/hosts/hosts | 2 +- inventory/gce/hosts/hosts | 2 +- inventory/libvirt/hosts/hosts | 2 +- inventory/openstack/hosts/hosts | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts index 34a4396bd..bf4e0845a 100644 --- a/inventory/aws/hosts/hosts +++ b/inventory/aws/hosts/hosts @@ -1 +1 @@ -localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 +localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts index 34a4396bd..bf4e0845a 100644 --- a/inventory/gce/hosts/hosts +++ b/inventory/gce/hosts/hosts @@ -1 +1 @@ -localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 +localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts index 34a4396bd..bf4e0845a 100644 --- a/inventory/libvirt/hosts/hosts +++ b/inventory/libvirt/hosts/hosts @@ -1 +1 @@ -localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 +localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts index 9cdc31449..2d2194a4d 100644 --- a/inventory/openstack/hosts/hosts +++ b/inventory/openstack/hosts/hosts @@ -1 +1 @@ -localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local +localhost ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' connection=local -- cgit v1.2.3 From c17efa0172a11f79cb28d3c5740b7c16ed70c3b8 Mon Sep 17 00:00:00 2001 From: Diego Castro Date: Sun, 9 Aug 2015 12:40:28 -0300 Subject: Fix node labels --- playbooks/common/openshift-node/config.yml | 3 --- roles/openshift_manage_node/tasks/main.yml | 7 +++---- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 122cfbf92..4010b4c9e 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -126,9 +126,6 @@ - name: Set scheduleability hosts: oo_first_master vars: - openshift_node_labels: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift.node.labels') }}" openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) | oo_collect('openshift.common.hostname') }}" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 257bcee2c..c488af723 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -19,7 +19,6 @@ - name: Label nodes command: > - {{ openshift.common.client_binary }} label --overwrite node {{ item.0 }} {{ item.1 | oo_combine_dict }} - with_nested: - - openshift_nodes - - openshift_node_labels + {{ openshift.common.client_binary }} label --overwrite node {{ item }} {{ hostvars[item]['openshift_node_labels'] | oo_combine_dict }} + with_items: + - "{{ openshift_nodes }}" -- cgit v1.2.3 From 9a41d23f553ab638af22f54fbd30c4de3b0eae18 Mon Sep 17 00:00:00 2001 From: Diego Castro Date: Mon, 10 Aug 2015 11:50:57 -0300 Subject: Don't try to label node if there's no labels --- roles/openshift_manage_node/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index c488af723..472d63efe 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -22,3 +22,5 @@ {{ openshift.common.client_binary }} label --overwrite node {{ item }} {{ hostvars[item]['openshift_node_labels'] | oo_combine_dict }} with_items: - "{{ openshift_nodes }}" + when: + "'openshift_node_labels' in hostvars[item]" -- cgit v1.2.3 From 7a12b210856a2abefb2a7d70448975c7f2ce14af Mon Sep 17 00:00:00 2001 From: "Diego Castro (dscastro)" Date: Wed, 12 Aug 2015 11:44:34 -0300 Subject: Notes for schedule behavior. --- roles/openshift_node/README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 5edb3b8dd..300e6b495 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -34,6 +34,18 @@ openshift_common Example Playbook ---------------- +Notes +----- + +Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below: + +``` +oadm manage-node --schedulable=false ${NODE} +oadm manage-node --evacuate ${NODE} +oadm manage-node --schedulable=true ${NODE} +```` + + TODO License -- cgit v1.2.3 From dcdb74b61cd49bee70da5997b9990da86cc3b1c8 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 12 Aug 2015 09:33:02 -0400 Subject: Add support for setting default node selector --- inventory/byo/hosts.example | 3 +++ roles/openshift_master/tasks/main.yml | 1 + roles/openshift_master/templates/master.yaml.v1.j2 | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 629956d0e..dd5c02b77 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -60,6 +60,9 @@ deployment_type=enterprise # additional cors origins #osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] +# default project node selector +#osm_default_node_selector='region=primary' + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 151d0662f..f90f526c9 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -55,6 +55,7 @@ sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}" default_subdomain: "{{ osm_default_subdomain | default(None) }}" custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}" + default_node_selector: "{{ osm_default_node_selector | default(None) }}" # TODO: These values need to be configurable - name: Set dns OpenShift facts diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index c4d319c87..b5e3d2f05 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -95,7 +95,7 @@ policyConfig: openshiftSharedResourcesNamespace: openshift {# TODO: Allow users to override projectConfig items #} projectConfig: - defaultNodeSelector: "" + defaultNodeSelector: "{{ openshift.master.default_node_selector | default("") }}" projectRequestMessage: "" projectRequestTemplate: "" securityAllocator: -- cgit v1.2.3 From db0078cd9631d841bef5c176aed18a7907871d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Wed, 12 Aug 2015 15:26:12 +0200 Subject: Force SELinux on "enterprise" deployment type --- roles/openshift_node/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 5188df973..f9c3d10e9 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -6,6 +6,9 @@ - fail: msg: This role requres that osn_cluster_dns_ip is set when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip +- fail: + msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." + when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online'] - name: Install OpenShift Node package yum: pkg=openshift-node state=present -- cgit v1.2.3 From e59348bc7e8ece270be2fa75954e765c0eb554d7 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Fri, 31 Jul 2015 12:27:13 -0400 Subject: Enable htpasswd by default in the example hosts file. --- inventory/byo/hosts.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 629956d0e..f3bdb5fad 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -33,7 +33,7 @@ deployment_type=enterprise #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] # htpasswd auth -#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}] +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}] # Allow all auth #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] -- cgit v1.2.3 From 800256e451b112d3cd7c2356615572c0de3c3840 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 12 Aug 2015 22:01:06 -0400 Subject: Add support for setting kubeletArguments, controllerArguments, and apiServerArguments --- roles/openshift_master/tasks/main.yml | 2 ++ roles/openshift_master/templates/master.yaml.v1.j2 | 6 ++++++ roles/openshift_node/tasks/main.yml | 1 + roles/openshift_node/templates/node.yaml.v1.j2 | 3 +++ 4 files changed, 12 insertions(+) diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index f90f526c9..3ee21b902 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -56,6 +56,8 @@ default_subdomain: "{{ osm_default_subdomain | default(None) }}" custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}" default_node_selector: "{{ osm_default_node_selector | default(None) }}" + api_server_args: "{{ osm_api_server_args | default(None) }}" + controller_args: "{{ osm_controller_args | default(None) }}" # TODO: These values need to be configurable - name: Set dns OpenShift facts diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index b5e3d2f05..44567aa22 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -2,6 +2,9 @@ apiLevels: - v1beta3 - v1 apiVersion: v1 +{% if api_server_args is defined and api_server_args %} +apiServerArguments: {{ api_server_args }} +{% endif %} assetConfig: logoutURL: "" masterPublicURL: {{ openshift.master.public_api_url }} @@ -13,6 +16,9 @@ assetConfig: keyFile: master.server.key maxRequestsInFlight: 0 requestTimeoutSeconds: 0 +{% if controller_args is defined and controller_args %} +controllerArguments: {{ controller_args }} +{% endif %} corsAllowedOrigins: {% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %} - {{ origin }} diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e84e74b40..3225645a2 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -33,6 +33,7 @@ registry_url: "{{ oreg_url | default(none) }}" debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" portal_net: "{{ openshift_master_portal_net | default(None) }}" + kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 7778a2a61..e6f75a4c0 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -8,6 +8,9 @@ imageConfig: format: {{ openshift.node.registry_url }} latest: false kind: NodeConfig +{% if openshift.common.kubelet_args is defined and openshift.common.kubelet_args %} +kubeletArguments: {{ kubelet_args }} +{% endif %} masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig networkPluginName: {{ openshift.common.sdn_network_plugin_name }} nodeName: {{ openshift.common.hostname }} -- cgit v1.2.3 From 6248101e89cb2feb63692b7ff396d2cf4f6466fb Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 13 Aug 2015 11:15:16 -0700 Subject: pause for a minimum of 15 seconds --- roles/fluentd_master/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index d64900eb0..9c21c06fe 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -40,7 +40,7 @@ mode: 0444 - name: "Pause before restarting td-agent and openshift-master, depending on the number of nodes." - pause: seconds={{ num_nodes|int * 5 }} + pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes * 5)) }} - name: ensure td-agent is running service: -- cgit v1.2.3 From 089d368d2976818d08c656f45711e25e70cf7a35 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 13 Aug 2015 12:14:02 -0700 Subject: make sure that number is an int --- roles/fluentd_master/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index 9c21c06fe..d592dc306 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -40,7 +40,7 @@ mode: 0444 - name: "Pause before restarting td-agent and openshift-master, depending on the number of nodes." - pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes * 5)) }} + pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes|int * 5)) }} - name: ensure td-agent is running service: -- cgit v1.2.3 From a5a75ed355b02d6729c492ac14091a6c8ff29514 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 13 Aug 2015 15:47:24 -0400 Subject: Require etcd 2.* Fixes #422 When etcd-2.1 is available in RHEL7 / Centos 7 we'll bumpt to that as it's considerably more stable with regard to WAL corruption and recovery. --- roles/etcd/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 79a91dfde..27bfb7de9 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Install etcd - yum: pkg=etcd state=present + yum: pkg=etcd-2.* state=present - name: Validate permissions on the config dir file: -- cgit v1.2.3 From 000e179cb3d39756d9bf5f846e2be3a7d3759f5f Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Wed, 12 Aug 2015 16:19:25 -0400 Subject: Changes to make documentation less specific to OSE or AE and also adds README_AEP.md. --- README.md | 7 +- README_AEP.md | 240 +++++++++++++++++++++++++++++++++++++++ docs/best_practices_guide.adoc | 2 +- roles/openshift_common/README.md | 6 +- 4 files changed, 248 insertions(+), 7 deletions(-) create mode 100644 README_AEP.md diff --git a/README.md b/README.md index 2bdaefd4c..7544e8e2a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -#openshift-ansible +#Openshift and Atomic Enterprise Ansible -This repo contains OpenShift Ansible code. +This repo contains Ansible code for Openshift and Atomic Enterprise. ##Setup - Install base dependencies: @@ -23,12 +23,13 @@ This repo contains OpenShift Ansible code. - Bring your own host deployments: - [OpenShift Enterprise](README_OSE.md) - [OpenShift Origin](README_origin.md) + - [Atomic Enterprise](README_AEP.md) - Build - [How to build the openshift-ansible rpms](BUILD.md) - Directory Structure: - - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters + - [bin/cluster](bin/cluster) - python script to easily create clusters - [docs](docs) - Documentation for the project - [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible - [inventory/](inventory) - houses Ansible dynamic inventory scripts diff --git a/README_AEP.md b/README_AEP.md new file mode 100644 index 000000000..e29888617 --- /dev/null +++ b/README_AEP.md @@ -0,0 +1,240 @@ +# Installing AEP from dev puddles using ansible + +* [Requirements](#requirements) +* [Caveats](#caveats) +* [Known Issues](#known-issues) +* [Configuring the host inventory](#configuring-the-host-inventory) +* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups) +* [Running the ansible playbooks](#running-the-ansible-playbooks) +* [Post-ansible steps](#post-ansible-steps) +* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames) + +## Requirements +* ansible + * Tested using ansible 1.9.1 and 1.9.2 + * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842 + * Available in Fedora channels + * Available for EL with EPEL and Optional channel +* One or more RHEL 7.1 VMs +* Either ssh key based auth for the root user or ssh key based auth for a user + with sudo access (no password) +* A checkout of atomic-enterprise-ansible from https://github.com/projectatomic/atomic-enterprise-ansible/ + + ```sh + git clone https://github.com/projectatomic/atomic-enterprise-ansible.git + cd atomic-enterprise-ansible + ``` + +## Caveats +This ansible repo is currently under heavy revision for providing OSE support; +the following items are highly likely to change before the OSE support is +merged into the upstream repo: + * the current git branch for testing + * how the inventory file should be configured + * variables that need to be set + * bootstrapping steps + * other configuration steps + +## Known Issues +* Host subscriptions are not configurable yet, the hosts need to be + pre-registered with subscription-manager or have the RHEL base repo + pre-configured. If using subscription-manager the following commands will + disable all but the rhel-7-server rhel-7-server-extras and + rhel-server7-ose-beta repos: +```sh +subscription-manager repos --disable="*" +subscription-manager repos \ +--enable="rhel-7-server-rpms" \ +--enable="rhel-7-server-extras-rpms" \ +--enable="rhel-7-server-ose-3.0-rpms" +``` +* Configuration of router is not automated yet +* Configuration of docker-registry is not automated yet + +## Configuring the host inventory +[Ansible docs](http://docs.ansible.com/intro_inventory.html) + +Example inventory file for configuring one master and two nodes for the test +environment. This can be configured in the default inventory file +(/etc/ansible/hosts), or using a custom file and passing the --inventory +option to ansible-playbook. + +/etc/ansible/hosts: +```ini +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a password +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true +#ansible_sudo=true + +# To deploy origin, change deployment_type to origin +deployment_type=enterprise + +# Pre-release registry URL +oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version} + +# Pre-release additional repo +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', +'baseurl': +'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', +'enabled': 1, 'gpgcheck': 0}] + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': +'OpenShift Origin COPR', 'baseurl': +'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', +'enabled': 1, 'gpgcheck': 1, gpgkey: +'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + +# host group for masters +[masters] +ose3-master.example.com + +# host group for nodes +[nodes] +ose3-node[1:2].example.com +``` + +The hostnames above should resolve both from the hosts themselves and +the host where ansible is running (if different). + +## Running the ansible playbooks +From the atomic-enterprise-ansible checkout run: +```sh +ansible-playbook playbooks/byo/config.yml +``` +**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different +inventory file use the -i option for ansible-playbook. + +## Post-ansible steps +#### Create the default router +On the master host: +```sh +oadm router --create=true \ + --credentials=/etc/openshift/master/openshift-router.kubeconfig \ + --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}' +``` + +#### Create the default docker-registry +On the master host: +```sh +oadm registry --create=true \ + --credentials=/etc/openshift/master/openshift-registry.kubeconfig \ + --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}' \ + --mount-host=/var/lib/openshift/docker-registry +``` + +## Overriding detected ip addresses and hostnames +Some deployments will require that the user override the detected hostnames +and ip addresses for the hosts. To see what the default values will be you can +run the openshift_facts playbook: +```sh +ansible-playbook playbooks/byo/openshift_facts.yml +``` +The output will be similar to: +``` +ok: [10.3.9.45] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com", + "ip": "172.16.4.79", + "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com", + "public_ip": "10.3.9.45", + "use_openshift_sdn": true + }, + "provider": { + ... ... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +ok: [10.3.9.42] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com", + "ip": "172.16.4.75", + "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com", + "public_ip": "10.3.9.42", + "use_openshift_sdn": true + }, + "provider": { + ...... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +ok: [10.3.9.36] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com", + "ip": "172.16.4.73", + "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com", + "public_ip": "10.3.9.36", + "use_openshift_sdn": true + }, + "provider": { + ...... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +``` +Now, we want to verify the detected common settings to verify that they are +what we expect them to be (if not, we can override them). + +* hostname + * Should resolve to the internal ip from the instances themselves. + * openshift_hostname will override. +* ip + * Should be the internal ip of the instance. + * openshift_ip will override. +* public hostname + * Should resolve to the external ip from hosts outside of the cloud + * provider openshift_public_hostname will override. +* public_ip + * Should be the externally accessible ip associated with the instance + * openshift_public_ip will override +* use_openshift_sdn + * Should be true unless the cloud is GCE. + * openshift_use_openshift_sdn overrides + +To override the the defaults, you can set the variables in your inventory: +``` +...snip... +[masters] +ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com +...snip... +``` diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc index a146b93ad..4b7d7c43d 100644 --- a/docs/best_practices_guide.adoc +++ b/docs/best_practices_guide.adoc @@ -421,7 +421,7 @@ For consistency, role names SHOULD follow the above naming pattern. It is import Many times the `technology` portion of the pattern will line up with a package name. It is advised that whenever possible, the package name should be used. .Examples: -* The role to configure an OpenShift Master is called `openshift_master` +* The role to configure a Master is called `openshift_master` * The role to configure OpenShift specific yum repositories is called `openshift_repos` === Filters diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index eb4ef26e8..1eb04626f 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -1,7 +1,7 @@ -OpenShift Common -================ +OpenShift/Atomic Enterprise Common +=================================== -OpenShift common installation and configuration tasks. +OpenShift/Atomic Enterprise common installation and configuration tasks. Requirements ------------ -- cgit v1.2.3 From abd6132a81ed7b9e7931af1271db9067e9b51536 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Thu, 13 Aug 2015 18:32:19 -0400 Subject: Changed the string Master to master to make it more readable. --- docs/best_practices_guide.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc index 4b7d7c43d..08d95b2b8 100644 --- a/docs/best_practices_guide.adoc +++ b/docs/best_practices_guide.adoc @@ -421,7 +421,7 @@ For consistency, role names SHOULD follow the above naming pattern. It is import Many times the `technology` portion of the pattern will line up with a package name. It is advised that whenever possible, the package name should be used. .Examples: -* The role to configure a Master is called `openshift_master` +* The role to configure a master is called `openshift_master` * The role to configure OpenShift specific yum repositories is called `openshift_repos` === Filters -- cgit v1.2.3 From 65f9922028595c36eb10c8f43b4db51817d64c32 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 13 Aug 2015 16:15:44 -0400 Subject: Fix for node labeling where internal node name != inventory_hostname --- playbooks/common/openshift-node/config.yml | 3 ++- roles/openshift_manage_node/tasks/main.yml | 7 +++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 4010b4c9e..705f7f223 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -128,9 +128,10 @@ vars: openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift.common.hostname') }}" + | oo_collect('openshift.common.hostname') }}" openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([])) | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}" + openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" pre_tasks: - set_fact: openshift_scheduleable_nodes: "{{ hostvars diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 472d63efe..cbf1c667f 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -19,8 +19,7 @@ - name: Label nodes command: > - {{ openshift.common.client_binary }} label --overwrite node {{ item }} {{ hostvars[item]['openshift_node_labels'] | oo_combine_dict }} + {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname }} {{ item.openshift.node.labels | oo_combine_dict }} with_items: - - "{{ openshift_nodes }}" - when: - "'openshift_node_labels' in hostvars[item]" + - "{{ openshift_node_vars }}" + when: "'labels' in item.openshift.node" -- cgit v1.2.3 From 7c45c23b6769779393e2bdf1d17f9e605a55d300 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Fri, 14 Aug 2015 09:10:18 +0200 Subject: Fix pylint errors on oo_filters.py --- filter_plugins/oo_filters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 9c263f0dd..c3408702d 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -130,7 +130,7 @@ class FilterModule(object): rval.append("%s%s%s" % (item['key'], joiner, item['value'])) return rval - + @staticmethod def oo_combine_dict(data, in_joiner='=', out_joiner=' '): '''Take a dict in the form of { 'key': 'value', 'key': 'value' } and @@ -139,7 +139,7 @@ class FilterModule(object): if not issubclass(type(data), dict): raise errors.AnsibleFilterError("|failed expects first param is a dict") - return out_joiner.join([ in_joiner.join([k, v]) for k, v in data.items() ]) + return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()]) @staticmethod def oo_ami_selector(data, image_name): -- cgit v1.2.3 From a82ae49cdf7b44b74da9f19cac313f496cfe4e04 Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Fri, 14 Aug 2015 14:47:15 -0400 Subject: Skip node label if labels are empty --- roles/openshift_manage_node/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index cbf1c667f..74e702248 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -22,4 +22,4 @@ {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname }} {{ item.openshift.node.labels | oo_combine_dict }} with_items: - "{{ openshift_node_vars }}" - when: "'labels' in item.openshift.node" + when: "'labels' in item.openshift.node and item.openshift.node.labels != {}" -- cgit v1.2.3 From 29f4037106ac8ada0955f5c1f309b5de3e0e94ea Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Fri, 14 Aug 2015 16:09:51 -0400 Subject: Update instance sizes for online --- playbooks/aws/openshift-cluster/vars.online.int.yml | 4 ++-- playbooks/aws/openshift-cluster/vars.online.prod.yml | 4 ++-- playbooks/aws/openshift-cluster/vars.online.stage.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml index b9ee29b83..bb18e13b0 100644 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -3,9 +3,9 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m4.large +ec2_master_instance_type: t2.small ec2_master_security_groups: [ 'integration', 'integration-master' ] -ec2_infra_instance_type: m4.large +ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'integration', 'integration-infra' ] ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'integration', 'integration-node' ] diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml index 691582834..bbef9cc56 100644 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -3,9 +3,9 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m4.large +ec2_master_instance_type: t2.small ec2_master_security_groups: [ 'production', 'production-master' ] -ec2_infra_instance_type: m4.large +ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'production', 'production-infra' ] ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'production', 'production-node' ] diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml index 2ec43ad4c..9008a55ba 100644 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -3,9 +3,9 @@ ec2_image: ami-9101c8fa ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra -ec2_master_instance_type: m4.large +ec2_master_instance_type: t2.small ec2_master_security_groups: [ 'stage', 'stage-master' ] -ec2_infra_instance_type: m4.large +ec2_infra_instance_type: c4.large ec2_infra_security_groups: [ 'stage', 'stage-infra' ] ec2_node_instance_type: m4.large ec2_node_security_groups: [ 'stage', 'stage-node' ] -- cgit v1.2.3