diff options
-rw-r--r-- | README_libvirt.md | 8 | ||||
-rw-r--r-- | filter_plugins/oo_filters.py | 8 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.external.example | 10 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.mixed.example | 10 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.native.example | 10 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.registry-only.example | 10 | ||||
-rw-r--r-- | inventory/byo/hosts.byo.glusterfs.storage-and-registry.example | 16 | ||||
-rw-r--r-- | inventory/byo/hosts.origin.example | 8 | ||||
-rw-r--r-- | inventory/byo/hosts.ose.example | 8 | ||||
-rw-r--r-- | playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml | 33 | ||||
-rw-r--r-- | playbooks/common/openshift-master/config.yml | 3 | ||||
-rw-r--r-- | playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml | 10 | ||||
-rw-r--r-- | roles/ansible_service_broker/vars/openshift-enterprise.yml | 2 | ||||
-rw-r--r-- | roles/openshift_default_storage_class/defaults/main.yml | 4 | ||||
-rw-r--r-- | roles/openshift_excluder/tasks/unexclude.yml | 4 | ||||
-rwxr-xr-x | roles/openshift_facts/library/openshift_facts.py | 7 | ||||
-rw-r--r-- | roles/openshift_master/tasks/main.yml | 6 |
17 files changed, 108 insertions, 49 deletions
diff --git a/README_libvirt.md b/README_libvirt.md index c523d83fb..1661681a0 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -15,7 +15,7 @@ Install dependencies 3. Install [ebtables](http://ebtables.netfilter.org/) 4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page) 5. Install [libvirt-python and libvirt](http://libvirt.org/) -6. Install [genisoimage](http://cdrkit.org/) +6. Install [genisoimage](http://cdrkit.org/) or [mkisofs](http://cdrtools.sourceforge.net/private/cdrecord.html) 7. Enable and start the libvirt daemon, e.g: - `systemctl enable libvirtd` - `systemctl start libvirtd` @@ -23,6 +23,7 @@ Install dependencies 9. Check that your `$HOME` is accessible to the qemu user² 10. Configure dns resolution on the host³ 11. Install libselinux-python +12. Ensure you have an SSH private and public keypair at `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`⁴ #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access. @@ -103,6 +104,11 @@ sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1 ``` +#### ⁴ Private and public keypair in ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub + +This playbook uses SSH keys to communicate with the libvirt-driven virtual machines. At this time the names of those keys are fixed and cannot be changed. + + Test The Setup -------------- diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index c6d0e69eb..36a90a870 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -194,10 +194,10 @@ def oo_select_keys_from_list(data, keys): """ if not isinstance(data, list): - raise errors.AnsibleFilterError("|failed expects to filter on a list") + raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects to filter on a list") if not isinstance(keys, list): - raise errors.AnsibleFilterError("|failed expects first param is a list") + raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [oo_select_keys(item, keys) for item in data] @@ -213,10 +213,10 @@ def oo_select_keys(data, keys): """ if not isinstance(data, Mapping): - raise errors.AnsibleFilterError("|failed expects to filter on a dict or object") + raise errors.AnsibleFilterError("|oo_select_keys failed expects to filter on a dict or object") if not isinstance(keys, list): - raise errors.AnsibleFilterError("|failed expects first param is a list") + raise errors.AnsibleFilterError("|oo_select_keys failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [data[key] for key in keys if key in data] diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example index 628d3a3f7..5a284ce97 100644 --- a/inventory/byo/hosts.byo.glusterfs.external.example +++ b/inventory/byo/hosts.byo.glusterfs.external.example @@ -31,13 +31,13 @@ openshift_storage_glusterfs_is_native=False openshift_storage_glusterfs_heketi_url=172.0.0.1 [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +master openshift_schedulable=False +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example index fd47cb9d5..d16df6470 100644 --- a/inventory/byo/hosts.byo.glusterfs.mixed.example +++ b/inventory/byo/hosts.byo.glusterfs.mixed.example @@ -34,13 +34,13 @@ openshift_storage_glusterfs_heketi_is_native=True openshift_storage_glusterfs_heketi_executor=ssh openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +master openshift_schedulable=False +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example index a3e2570c9..c1a1f6f84 100644 --- a/inventory/byo/hosts.byo.glusterfs.native.example +++ b/inventory/byo/hosts.byo.glusterfs.native.example @@ -24,15 +24,15 @@ ansible_ssh_user=root openshift_deployment_type=origin [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False +master openshift_schedulable=False # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example index 999518abe..31a85ee42 100644 --- a/inventory/byo/hosts.byo.glusterfs.registry-only.example +++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example @@ -30,15 +30,15 @@ openshift_deployment_type=origin openshift_hosted_registry_storage_kind=glusterfs [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False +master openshift_schedulable=False # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". -node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example index 1df79301a..54bd89ddc 100644 --- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example +++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example @@ -31,20 +31,20 @@ openshift_deployment_type=origin openshift_hosted_registry_storage_kind=glusterfs [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False +master openshift_schedulable=False # It is recommended to not use a single cluster for both general and registry # storage, so two three-node clusters will be required. -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". -node3 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node4 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node5 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index ff2bdb0c5..de7493f71 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -883,6 +883,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 # where as this would not # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal==true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal==false # host group for masters [masters] diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 68639eebf..62a364e0d 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -879,6 +879,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 # where as this would not # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal==true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal==false # host group for masters [masters] diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 2b2f10aee..695dc3140 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -12,6 +12,12 @@ command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -140,16 +146,21 @@ - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - - set_fact: - master_update_complete: True - -- name: Post master upgrade - Upgrade clusterpolicies storage - hosts: oo_first_master - tasks: - - name: Upgrade clusterpolicies storage + - name: Post master upgrade - Upgrade clusterpolicies storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool + run_once: true + delegate_to: oo_first_master + + - set_fact: + master_update_complete: True ############################################################################## # Gate on master update complete @@ -230,11 +241,17 @@ - reconcile_scc_result.rc == 0 run_once: true - - name: Upgrade job storage + - name: Migrate storage post policy reconciliation command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm run_once: true + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool - set_fact: reconcile_complete: True diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 7d3a371e3..5de03951c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -127,6 +127,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" roles: - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index ccd29be29..4df86effa 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -49,11 +49,15 @@ - '{{ instances }}' - [ user-data, meta-data ] +- name: Check for genisoimage + command: which genisoimage + register: which_genisoimage + - name: Create the cloud-init config drive - command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' + command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data" args: - chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/" + creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso" with_items: '{{ instances }}' - name: Refresh the libvirt storage pool for openshift diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index f672760aa..0b3a2a69d 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -1,6 +1,6 @@ --- -__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ +__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose- __ansible_service_broker_image_tag: latest __ansible_service_broker_etcd_image_prefix: rhel7/ diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index 8d07dbecc..bdece7640 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -1,7 +1,7 @@ --- openshift_storageclass_defaults: aws: - provisioner: kubernetes.io/aws-ebs + provisioner: aws-ebs name: gp2 parameters: type: gp2 @@ -9,7 +9,7 @@ openshift_storageclass_defaults: encrypted: 'false' gce: name: standard - provisioner: kubernetes.io/gce-pd + provisioner: gce-pd parameters: type: pd-standard diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml index a5ce8d5c7..a68165bde 100644 --- a/roles/openshift_excluder/tasks/unexclude.yml +++ b/roles/openshift_excluder/tasks/unexclude.yml @@ -9,7 +9,7 @@ register: docker_excluder_stat - name: disable docker excluder - command: "{{ r_openshift_excluder_service_type }}-docker-excluder unexclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-docker-excluder unexclude" when: - unexclude_docker_excluder | default(false) | bool - docker_excluder_stat.stat.exists @@ -20,7 +20,7 @@ register: openshift_excluder_stat - name: disable openshift excluder - command: "{{ r_openshift_excluder_service_type }}-excluder unexclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-excluder unexclude" when: - unexclude_openshift_excluder | default(false) | bool - openshift_excluder_stat.stat.exists diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 4712ca3a8..49cc51b48 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1647,6 +1647,13 @@ def set_proxy_facts(facts): common['no_proxy'] = common['no_proxy'].split(",") elif 'no_proxy' not in common: common['no_proxy'] = [] + + # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783 + # masters behind a proxy need to connect to etcd via IP + if 'no_proxy_etcd_host_ips' in common: + if isinstance(common['no_proxy_etcd_host_ips'], string_types): + common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(',')) + if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']): if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 9b7125240..c643c6c46 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -140,6 +140,12 @@ - set_fact: openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" +- name: Set fact of all etcd host IPs + openshift_facts: + role: common + local_facts: + no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}" + - name: Install the systemd units include: systemd_units.yml |