diff options
25 files changed, 302 insertions, 293 deletions
@@ -1,5 +1,5 @@ [flake8] # TODO: cleanup flake8 issues with utils/test/* -exclude=.tox,inventory,utils/test +exclude=.tox,inventory max_line_length = 120 ignore = E501,T003 diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml index 7fd60743c..a2361d50c 100644 --- a/playbooks/container-runtime/private/build_container_groups.yml +++ b/playbooks/container-runtime/private/build_container_groups.yml @@ -3,4 +3,4 @@ hosts: oo_all_hosts:!oo_nodes_to_config tasks: - group_by: - key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }} + key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }} diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index fb621f898..2e8231ccc 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -186,13 +186,9 @@ resources: $ ansible-playbook --user openshift \ -i openshift-ansible/playbooks/openstack/inventory.py -i inventory \ - openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml \ - -e openshift_repos_enable_testing=true + openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml ``` -Note, you may want to use the testing repo for development purposes only. -Normally, `openshift_repos_enable_testing` should not be specified. - In addition to *your* inventory with your OpenShift and OpenStack configuration, we are also supplying the [dynamic inventory][dynamic] from `openshift-ansible/inventory`. It's a script that will look at the Nova servers diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 1bc1b5e43..f869b5fae 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -375,6 +375,11 @@ secret: secretName: etcd-auth-secret +- name: set auth name and type facts if needed + set_fact: + ansible_service_broker_registry_auth_type: "secret" + ansible_service_broker_registry_auth_name: "asb-registry-auth" + when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != "" # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: - name: Create config map for ansible-service-broker @@ -402,6 +407,8 @@ org: {{ ansible_service_broker_registry_organization }} tag: {{ ansible_service_broker_registry_tag }} white_list: {{ ansible_service_broker_registry_whitelist | to_yaml }} + auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}" + auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}" - type: local_openshift name: localregistry namespaces: ['openshift'] @@ -447,6 +454,7 @@ data: "{{ ansible_service_broker_registry_user }}" - path: password data: "{{ ansible_service_broker_registry_password }}" + when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != "" - name: Create the Broker resource in the catalog oc_obj: diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index d6e7e7fed..ed9a2709b 100644 --- a/roles/container_runtime/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -1,6 +1,17 @@ --- - include_tasks: common/pre.yml +# In some cases, some services may be run as containers and docker may still +# be installed via rpm. +- include_tasks: common/atomic_proxy.yml + when: + - > + (openshift_use_system_containers | default(False)) | bool + or (openshift_use_etcd_system_container | default(False)) | bool + or (openshift_use_openvswitch_system_container | default(False)) | bool + or (openshift_use_node_system_container | default(False)) | bool + or (openshift_use_master_system_container | default(False)) | bool + - name: Get current installed Docker version command: "{{ repoquery_installed }} --qf '%{version}' docker" when: not openshift_is_atomic | bool diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 83e551b5d..b9c41d1b4 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -5,6 +5,7 @@ Health checks for OpenShift clusters. import json import operator import os +import re import time import collections @@ -309,28 +310,38 @@ class OpenShiftCheck(object): name_list = name_list.split(',') return [name.strip() for name in name_list if name.strip()] - @staticmethod - def get_major_minor_version(openshift_image_tag): + def get_major_minor_version(self, openshift_image_tag=None): """Parse and return the deployed version of OpenShift as a tuple.""" - if openshift_image_tag and openshift_image_tag[0] == 'v': - openshift_image_tag = openshift_image_tag[1:] - # map major release versions across releases - # to a common major version - openshift_major_release_version = { - "1": "3", - } + version = openshift_image_tag or self.get_var("openshift_image_tag") + components = [int(component) for component in re.findall(r'\d+', version)] - components = openshift_image_tag.split(".") - if not components or len(components) < 2: + if len(components) < 2: msg = "An invalid version of OpenShift was found for this host: {}" - raise OpenShiftCheckException(msg.format(openshift_image_tag)) + raise OpenShiftCheckException(msg.format(version)) + + # map major release version across releases to OCP major version + components[0] = {1: 3}.get(components[0], components[0]) + + return tuple(int(x) for x in components[:2]) + + def get_required_version(self, name, version_map): + """Return the correct required version(s) for the current (or nearest) OpenShift version.""" + openshift_version = self.get_major_minor_version() + + earliest = min(version_map) + latest = max(version_map) + if openshift_version < earliest: + return version_map[earliest] + if openshift_version > latest: + return version_map[latest] - if components[0] in openshift_major_release_version: - components[0] = openshift_major_release_version[components[0]] + required_version = version_map.get(openshift_version) + if not required_version: + msg = "There is no recommended version of {} for the current version of OpenShift ({})" + raise OpenShiftCheckException(msg.format(name, ".".join(str(comp) for comp in openshift_version))) - components = tuple(int(x) for x in components[:2]) - return components + return required_version def find_ansible_mount(self, path): """Return the mount point for path from ansible_mounts.""" diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 0cad19842..58a2692bd 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp currently installed version of OpenShift. """ -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck from openshift_checks.mixins import NotContainerizedMixin @@ -16,10 +16,12 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): tags = ["health"] openshift_to_ovs_version = { - "3.7": ["2.6", "2.7", "2.8"], - "3.6": ["2.6", "2.7", "2.8"], - "3.5": ["2.6", "2.7"], - "3.4": "2.4", + (3, 4): "2.4", + (3, 5): ["2.6", "2.7"], + (3, 6): ["2.6", "2.7", "2.8"], + (3, 7): ["2.6", "2.7", "2.8"], + (3, 8): ["2.6", "2.7", "2.8"], + (3, 9): ["2.6", "2.7", "2.8"], } def is_active(self): @@ -40,16 +42,5 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): return self.execute_module("rpm_version", args) def get_required_ovs_version(self): - """Return the correct Open vSwitch version for the current OpenShift version""" - openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag")) - - if openshift_version_tuple < (3, 5): - return self.openshift_to_ovs_version["3.4"] - - openshift_version = ".".join(str(x) for x in openshift_version_tuple) - ovs_version = self.openshift_to_ovs_version.get(openshift_version) - if ovs_version: - return self.openshift_to_ovs_version[openshift_version] - - msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(openshift_version)) + """Return the correct Open vSwitch version(s) for the current OpenShift version.""" + return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index f3a628e28..28aee8b35 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -1,8 +1,6 @@ """Check that available RPM packages match the required versions.""" -import re - -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck from openshift_checks.mixins import NotContainerizedMixin @@ -18,6 +16,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): (3, 5): ["2.6", "2.7"], (3, 6): ["2.6", "2.7", "2.8"], (3, 7): ["2.6", "2.7", "2.8"], + (3, 8): ["2.6", "2.7", "2.8"], + (3, 9): ["2.6", "2.7", "2.8"], } openshift_to_docker_version = { @@ -27,11 +27,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): (3, 4): "1.12", (3, 5): "1.12", (3, 6): "1.12", - } - - # map major OpenShift release versions across releases to a common major version - map_major_release_version = { - 1: 3, + (3, 7): "1.12", + (3, 8): "1.12", + (3, 9): ["1.12", "1.13"], } def is_active(self): @@ -83,48 +81,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): def get_required_ovs_version(self): """Return the correct Open vSwitch version(s) for the current OpenShift version.""" - openshift_version = self.get_openshift_version_tuple() - - earliest = min(self.openshift_to_ovs_version) - latest = max(self.openshift_to_ovs_version) - if openshift_version < earliest: - return self.openshift_to_ovs_version[earliest] - if openshift_version > latest: - return self.openshift_to_ovs_version[latest] - - ovs_version = self.openshift_to_ovs_version.get(openshift_version) - if not ovs_version: - msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version))) - - return ovs_version + return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version) def get_required_docker_version(self): """Return the correct Docker version(s) for the current OpenShift version.""" - openshift_version = self.get_openshift_version_tuple() - - earliest = min(self.openshift_to_docker_version) - latest = max(self.openshift_to_docker_version) - if openshift_version < earliest: - return self.openshift_to_docker_version[earliest] - if openshift_version > latest: - return self.openshift_to_docker_version[latest] - - docker_version = self.openshift_to_docker_version.get(openshift_version) - if not docker_version: - msg = "There is no recommended version of Docker for the current version of OpenShift: {}" - raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version))) - - return docker_version - - def get_openshift_version_tuple(self): - """Return received image tag as a normalized (X, Y) minor version tuple.""" - version = self.get_var("openshift_image_tag") - comps = [int(component) for component in re.findall(r'\d+', version)] - - if len(comps) < 2: - msg = "An invalid version of OpenShift was found for this host: {}" - raise OpenShiftCheckException(msg.format(version)) - - comps[0] = self.map_major_release_version.get(comps[0], comps[0]) - return tuple(comps[0:2]) + return self.get_required_version("Docker", self.openshift_to_docker_version) diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index 0238f49d5..80c7a0541 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -1,26 +1,7 @@ import pytest -from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException - - -def test_openshift_version_not_supported(): - def execute_module(*_): - return {} - - openshift_release = '111.7.0' - - task_vars = dict( - openshift=dict(common=dict()), - openshift_release=openshift_release, - openshift_image_tag='v' + openshift_release, - openshift_deployment_type='origin', - openshift_service_type='origin' - ) - - with pytest.raises(OpenShiftCheckException) as excinfo: - OvsVersion(execute_module, task_vars).run() - - assert "no recommended version of Open vSwitch" in str(excinfo.value) +from openshift_checks.ovs_version import OvsVersion +from openshift_checks import OpenShiftCheckException def test_invalid_openshift_release_format(): diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index d2916f617..868b4bd12 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -1,6 +1,7 @@ import pytest -from openshift_checks.package_version import PackageVersion, OpenShiftCheckException +from openshift_checks.package_version import PackageVersion +from openshift_checks import OpenShiftCheckException def task_vars_for(openshift_release, deployment_type): @@ -18,7 +19,7 @@ def task_vars_for(openshift_release, deployment_type): def test_openshift_version_not_supported(): check = PackageVersion(None, task_vars_for("1.2.3", 'origin')) - check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict + check.get_major_minor_version = lambda: (3, 4, 1) # won't be in the dict with pytest.raises(OpenShiftCheckException) as excinfo: check.get_required_ovs_version() diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index 4a2ee64f0..6fdba6580 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -12,6 +12,7 @@ separator: '#' content: metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}" + metadata#annotations#openshift.io/logging.data.prefix: ".operations" with_items: "{{ __logging_ops_projects.stdout.split(' ') }}" loop_control: loop_var: project diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index fbc3e3fd1..00643c80e 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -131,13 +131,13 @@ when: not openshift_logging_install_eventrouter | default(false) | bool -# Update asset config in openshift-web-console namespace -- name: Remove Kibana route information from web console asset config +# Update console config in openshift-web-console namespace +- name: Remove Kibana route information from the web console config include_role: name: openshift_web_console - tasks_from: update_asset_config.yml + tasks_from: update_console_config.yml vars: - asset_config_edits: + console_config_edits: - key: loggingPublicURL value: "" when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 610c7b4e5..9fa0ad990 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -19,13 +19,13 @@ clusterrolebinding/hawkular-metrics changed_when: delete_metrics.stdout != 'No resources found' -# Update asset config in openshift-web-console namespace -- name: Remove metrics route information from web console asset config +# Update the web config in openshift-web-console namespace +- name: Remove metrics route information from the web console config include_role: name: openshift_web_console - tasks_from: update_asset_config.yml + tasks_from: update_console_config.yml vars: - asset_config_edits: + console_config_edits: - key: metricsPublicURL value: "" when: openshift_web_console_install | default(true) | bool diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index c1fab4382..0b10413c5 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -48,6 +48,12 @@ openshift_node_kubelet_args_dict: cloud-config: - "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}" node-labels: "{{ l_node_kubelet_node_labels }}" + azure: + cloud-provider: + - azure + cloud-config: + - "{{ openshift_config_base ~ '/cloudprovider/azure.conf' }}" + node-labels: "{{ l_node_kubelet_node_labels }}" undefined: node-labels: "{{ l_node_kubelet_node_labels }}" diff --git a/roles/openshift_web_console/tasks/rollout_console.yml b/roles/openshift_web_console/tasks/rollout_console.yml new file mode 100644 index 000000000..75682ba1d --- /dev/null +++ b/roles/openshift_web_console/tasks/rollout_console.yml @@ -0,0 +1,20 @@ +--- +- name: Check if console deployment exists + oc_obj: + kind: deployments + name: webconsole + namespace: openshift-web-console + state: list + register: console_deployment + +# There's currently no command to trigger a rollout for a k8s deployment +# without changing the pod spec. Add an annotation to force a rollout. +- name: Rollout updated web console deployment + oc_edit: + kind: deployments + name: webconsole + namespace: openshift-web-console + separator: '#' + content: + spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}" + when: console_deployment.results.results.0 | length > 0 diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml index e347c0193..4d2957977 100644 --- a/roles/openshift_web_console/tasks/update_console_config.yml +++ b/roles/openshift_web_console/tasks/update_console_config.yml @@ -58,14 +58,4 @@ changed_when: False # TODO: Only rollout if config has changed. -# There's currently no command to trigger a rollout for a k8s deployment -# without changing the pod spec. Add an annotation to force a rollout after -# the config map has been edited. -- name: Rollout updated web console deployment - oc_edit: - kind: deployments - name: webconsole - namespace: openshift-web-console - separator: '#' - content: - spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}" +- include_tasks: rollout_console.yml diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml index 7adb8a0d0..42d331ac5 100644 --- a/roles/openshift_web_console/vars/default_images.yml +++ b/roles/openshift_web_console/vars/default_images.yml @@ -1,4 +1,4 @@ --- -__openshift_web_console_prefix: "docker.io/openshift/" +__openshift_web_console_prefix: "docker.io/openshift/origin-" __openshift_web_console_version: "latest" -__openshift_web_console_image_name: "origin-web-console" +__openshift_web_console_image_name: "web-console" diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml index 721ac1d27..375c22067 100644 --- a/roles/openshift_web_console/vars/openshift-enterprise.yml +++ b/roles/openshift_web_console/vars/openshift-enterprise.yml @@ -1,4 +1,4 @@ --- -__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/" +__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-" __openshift_web_console_version: "v3.9" -__openshift_web_console_image_name: "ose-web-console" +__openshift_web_console_image_name: "web-console" diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 604e94602..82b211032 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -85,3 +85,9 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False + +- name: Rollout console so it discovers the template service broker is installed + include_role: + name: openshift_web_console + tasks_from: rollout_console.yml + when: openshift_web_console_install | default(true) | bool diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index db1b558e4..767e8ddc1 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -31,3 +31,9 @@ state: absent name: "{{ mktemp.stdout }}" changed_when: False + +- name: Rollout console so it discovers the template service broker is removed + include_role: + name: openshift_web_console + tasks_from: rollout_console.yml + when: openshift_web_console_install | default(true) | bool diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml index 4a28d47b2..5129f4471 100644 --- a/roles/tuned/tasks/main.yml +++ b/roles/tuned/tasks/main.yml @@ -28,7 +28,12 @@ when: item.state == 'file' - name: Make tuned use the recommended tuned profile on restart - file: path=/etc/tuned/active_profile state=absent + file: + path: '{{ item }}' + state: absent + with_items: + - /etc/tuned/active_profile + - /etc/tuned/profile_mode - name: Restart tuned service systemd: diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 673997c42..2259f3416 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -384,6 +384,7 @@ deployment: storage: """ + class UnattendedCliTests(OOCliFixture): def setUp(self): @@ -402,8 +403,9 @@ class UnattendedCliTests(OOCliFixture): load_facts_mock.return_value = (mock_facts, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + SAMPLE_CONFIG % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -481,8 +483,9 @@ class UnattendedCliTests(OOCliFixture): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + SAMPLE_CONFIG % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -490,16 +493,18 @@ class UnattendedCliTests(OOCliFixture): load_facts_args = load_facts_mock.call_args[0] self.assertEquals(os.path.join(self.work_dir, "hosts"), - load_facts_args[0]) - self.assertEquals(os.path.join(self.work_dir, - "playbooks/byo/openshift_facts.yml"), load_facts_args[1]) + load_facts_args[0]) + self.assertEquals( + os.path.join(self.work_dir, "playbooks/byo/openshift_facts.yml"), + load_facts_args[1]) env_vars = load_facts_args[2] - self.assertEquals(os.path.join(self.work_dir, - '.ansible/callback_facts.yaml'), + self.assertEquals( + os.path.join(self.work_dir, '.ansible/callback_facts.yaml'), env_vars['OO_INSTALL_CALLBACK_FACTS_YAML']) self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH']) # If user running test has rpm installed, this might be set to default: - self.assertTrue('ANSIBLE_CONFIG' not in env_vars or + self.assertTrue( + 'ANSIBLE_CONFIG' not in env_vars or env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) # Make sure we ran on the expected masters and nodes: @@ -515,8 +520,9 @@ class UnattendedCliTests(OOCliFixture): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), merged_config) + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + merged_config) self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -526,9 +532,9 @@ class UnattendedCliTests(OOCliFixture): inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('root', - inventory.get('OSEv3:vars', 'ansible_ssh_user')) + inventory.get('OSEv3:vars', 'ansible_ssh_user')) self.assertEquals('openshift-enterprise', - inventory.get('OSEv3:vars', 'deployment_type')) + inventory.get('OSEv3:vars', 'deployment_type')) # Check the masters: self.assertEquals(1, len(inventory.items('masters'))) @@ -546,13 +552,13 @@ class UnattendedCliTests(OOCliFixture): @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') - def test_variant_version_latest_assumed(self, load_facts_mock, - run_playbook_mock): + def test_variant_version_latest_assumed(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + SAMPLE_CONFIG % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -569,19 +575,18 @@ class UnattendedCliTests(OOCliFixture): inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('openshift-enterprise', - inventory.get('OSEv3:vars', 'deployment_type')) + inventory.get('OSEv3:vars', 'deployment_type')) @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') - def test_variant_version_preserved(self, load_facts_mock, - run_playbook_mock): + def test_variant_version_preserved(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 config = SAMPLE_CONFIG % 'openshift-enterprise' config = '%s\n%s' % (config, 'variant_version: 3.3') - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), config) + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), config) self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -597,7 +602,7 @@ class UnattendedCliTests(OOCliFixture): inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('openshift-enterprise', - inventory.get('OSEv3:vars', 'deployment_type')) + inventory.get('OSEv3:vars', 'deployment_type')) # unattended with bad config file and no installed hosts (without --force) @patch('ooinstall.openshift_ansible.run_main_playbook') @@ -606,25 +611,28 @@ class UnattendedCliTests(OOCliFixture): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + BAD_CONFIG % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) self.assertEquals(1, result.exit_code) - self.assertTrue("You must specify either an ip or hostname" + self.assertTrue( + "You must specify either an ip or hostname" in result.output) - #unattended with three masters, one node, and haproxy + # unattended with three masters, one node, and haproxy @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + QUICKHA_CONFIG % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -636,15 +644,16 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals(6, len(hosts)) self.assertEquals(6, len(hosts_to_run_on)) - #unattended with two masters, one node, and haproxy + # unattended with two masters, one node, and haproxy @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -653,15 +662,16 @@ class UnattendedCliTests(OOCliFixture): self.assert_result(result, 1) self.assertTrue("A minimum of 3 masters are required" in result.output) - #unattended with three masters, one node, but no load balancer specified: + # unattended with three masters, one node, but no load balancer specified: @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + QUICKHA_CONFIG_NO_LB % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -670,15 +680,16 @@ class UnattendedCliTests(OOCliFixture): self.assert_result(result, 1) self.assertTrue('No master load balancer specified in config' in result.output) - #unattended with three masters, one node, and one of the masters reused as load balancer: + # unattended with three masters, one node, and one of the masters reused as load balancer: @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -686,15 +697,16 @@ class UnattendedCliTests(OOCliFixture): # This is not a valid configuration: self.assert_result(result, 1) - #unattended with preconfigured lb + # unattended with preconfigured lb @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise') + config_file = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), + QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise') self.cli_args.extend(["-c", config_file, "install"]) result = self.runner.invoke(cli.cli, self.cli_args) @@ -706,6 +718,7 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals(6, len(hosts)) self.assertEquals(6, len(hosts_to_run_on)) + class AttendedCliTests(OOCliFixture): def setUp(self): @@ -720,17 +733,18 @@ class AttendedCliTests(OOCliFixture): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ('10.0.0.2', False, False), - ('10.0.0.3', False, False)], - ssh_user='root', - variant_num=1, - confirm_facts='y', - storage='10.1.0.1',) + cli_input = build_input( + hosts=[ + ('10.0.0.1', True, False), + ('10.0.0.2', False, False), + ('10.0.0.3', False, False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + storage='10.1.0.1',) self.cli_args.append("install") - result = self.runner.invoke(cli.cli, self.cli_args, - input=cli_input) + result = self.runner.invoke( + cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) self._verify_load_facts(load_facts_mock) @@ -741,12 +755,12 @@ class AttendedCliTests(OOCliFixture): inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) - self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', - 'openshift_schedulable=False') - self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2', - 'openshift_schedulable=True') - self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3', - 'openshift_schedulable=True') + self.assert_inventory_host_var( + inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') + self.assert_inventory_host_var_unset( + inventory, 'nodes', '10.0.0.2', 'openshift_schedulable=True') + self.assert_inventory_host_var_unset( + inventory, 'nodes', '10.0.0.3', 'openshift_schedulable=True') # interactive with config file and some installed some uninstalled hosts @patch('ooinstall.openshift_ansible.run_main_playbook') @@ -762,15 +776,16 @@ class AttendedCliTests(OOCliFixture): load_facts_mock.return_value = (mock_facts, 0) run_playbook_mock.return_value = 0 - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ('10.0.0.2', False, False), + cli_input = build_input( + hosts=[ + ('10.0.0.1', True, False), + ('10.0.0.2', False, False), ], - add_nodes=[('10.0.0.3', False, False)], - ssh_user='root', - variant_num=1, - confirm_facts='y', - storage='10.0.0.1',) + add_nodes=[('10.0.0.3', False, False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + storage='10.0.0.1',) self.cli_args.append("install") result = self.runner.invoke(cli.cli, self.cli_args, @@ -781,7 +796,6 @@ class AttendedCliTests(OOCliFixture): self.assertTrue('scaleup' in result.output) self.assert_result(result, 1) - @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock): @@ -830,26 +844,27 @@ class AttendedCliTests(OOCliFixture): # exp_hosts_to_run_on_len=2, # force=False) - #interactive multimaster: one more node than master + # interactive multimaster: one more node than master @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ('10.0.0.2', True, False), - ('10.0.0.3', True, False), - ('10.0.0.4', False, False)], - ssh_user='root', - variant_num=1, - confirm_facts='y', - master_lb=('10.0.0.5', False), - storage='10.1.0.1',) + cli_input = build_input( + hosts=[ + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', True, False), + ('10.0.0.4', False, False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=('10.0.0.5', False), + storage='10.1.0.1',) self.cli_args.append("install") - result = self.runner.invoke(cli.cli, self.cli_args, - input=cli_input) + result = self.runner.invoke( + cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) self._verify_load_facts(load_facts_mock) @@ -872,25 +887,26 @@ class AttendedCliTests(OOCliFixture): self.assertTrue(inventory.has_section('etcd')) self.assertEquals(3, len(inventory.items('etcd'))) - #interactive multimaster: identical masters and nodes + # interactive multimaster: identical masters and nodes @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ('10.0.0.2', True, False), - ('10.0.0.3', True, False)], - ssh_user='root', - variant_num=1, - confirm_facts='y', - master_lb=('10.0.0.5', False), - storage='10.1.0.1',) + cli_input = build_input( + hosts=[ + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', True, False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=('10.0.0.5', False), + storage='10.1.0.1',) self.cli_args.append("install") - result = self.runner.invoke(cli.cli, self.cli_args, - input=cli_input) + result = self.runner.invoke( + cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) self._verify_load_facts(load_facts_mock) @@ -919,7 +935,9 @@ class AttendedCliTests(OOCliFixture): full_line = "%s=%s" % (a, b) tokens = full_line.split() if tokens[0] == host: - self.assertTrue(variable in tokens[1:], "Unable to find %s in line: %s" % (variable, full_line)) + self.assertTrue( + variable in tokens[1:], + "Unable to find %s in line: %s" % (variable, full_line)) return self.fail("unable to find host %s in inventory" % host) @@ -938,45 +956,46 @@ class AttendedCliTests(OOCliFixture): return self.fail("unable to find host %s in inventory" % host) - - #interactive multimaster: attempting to use a master as the load balancer should fail: + # interactive multimaster: attempting to use a master as the load balancer should fail: @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) run_playbook_mock.return_value = 0 - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ('10.0.0.2', True, False), - ('10.0.0.3', False, False), - ('10.0.0.4', True, False)], - ssh_user='root', - variant_num=1, - confirm_facts='y', - master_lb=(['10.0.0.2', '10.0.0.5'], False), - storage='10.1.0.1') + cli_input = build_input( + hosts=[ + ('10.0.0.1', True, False), + ('10.0.0.2', True, False), + ('10.0.0.3', False, False), + ('10.0.0.4', True, False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=(['10.0.0.2', '10.0.0.5'], False), + storage='10.1.0.1') self.cli_args.append("install") - result = self.runner.invoke(cli.cli, self.cli_args, - input=cli_input) + result = self.runner.invoke( + cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) - #interactive all-in-one + # interactive all-in-one @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_all_in_one(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False)], - ssh_user='root', - variant_num=1, - confirm_facts='y', - storage='10.0.0.1') + cli_input = build_input( + hosts=[ + ('10.0.0.1', True, False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + storage='10.0.0.1') self.cli_args.append("install") - result = self.runner.invoke(cli.cli, self.cli_args, - input=cli_input) + result = self.runner.invoke( + cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) self._verify_load_facts(load_facts_mock) @@ -990,25 +1009,25 @@ class AttendedCliTests(OOCliFixture): self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=True') - @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_gen_inventory(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ('10.0.0.2', False, False), - ('10.0.0.3', False, False)], - ssh_user='root', - variant_num=1, - confirm_facts='y', - storage='10.1.0.1',) + cli_input = build_input( + hosts=[ + ('10.0.0.1', True, False), + ('10.0.0.2', False, False), + ('10.0.0.3', False, False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + storage='10.1.0.1',) self.cli_args.append("install") self.cli_args.append("--gen-inventory") - result = self.runner.invoke(cli.cli, self.cli_args, - input=cli_input) + result = self.runner.invoke( + cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) self._verify_load_facts(load_facts_mock) @@ -1021,12 +1040,12 @@ class AttendedCliTests(OOCliFixture): inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) - self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', - 'openshift_schedulable=False') - self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2', - 'openshift_schedulable=True') - self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3', - 'openshift_schedulable=True') + self.assert_inventory_host_var( + inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') + self.assert_inventory_host_var_unset( + inventory, 'nodes', '10.0.0.2', 'openshift_schedulable=True') + self.assert_inventory_host_var_unset( + inventory, 'nodes', '10.0.0.3', 'openshift_schedulable=True') # TODO: test with config file, attended add node diff --git a/utils/test/fixture.py b/utils/test/fixture.py index 873ac4a27..5c0d1d2c1 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -43,6 +43,7 @@ deployment: node: """ + def read_yaml(config_file_path): cfg_f = open(config_file_path, 'r') config = yaml.safe_load(cfg_f.read()) @@ -105,7 +106,7 @@ class OOCliFixture(OOInstallFixture): self.assertTrue('ip' in host) self.assertTrue('public_ip' in host) - #pylint: disable=too-many-arguments + # pylint: disable=too-many-arguments def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock, run_playbook_mock, cli_input, exp_hosts_len=None, exp_hosts_to_run_on_len=None, @@ -152,7 +153,7 @@ class OOCliFixture(OOInstallFixture): self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) -#pylint: disable=too-many-arguments,too-many-branches,too-many-statements +# pylint: disable=too-many-arguments,too-many-branches,too-many-statements def build_input(ssh_user=None, hosts=None, variant_num=None, add_nodes=None, confirm_facts=None, schedulable_masters_ok=None, master_lb=('', False), storage=None): @@ -190,7 +191,7 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, else: inputs.append('rpm') - #inputs.append('rpm') + # inputs.append('rpm') # We should not be prompted to add more hosts if we're currently at # 2 masters, this is an invalid HA configuration, so this question # will not be asked, and the user must enter the next host: @@ -224,13 +225,13 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, inputs.append('y') inputs.append('1') # Add more nodes i = 0 - for (host, is_master, is_containerized) in add_nodes: + for (host, _, is_containerized) in add_nodes: inputs.append(host) if is_containerized: inputs.append('container') else: inputs.append('rpm') - #inputs.append('rpm') + # inputs.append('rpm') if i < len(add_nodes) - 1: inputs.append('y') # Add more hosts else: diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index 5651e6e7a..80cdbe618 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -107,6 +107,7 @@ deployment: node: """ + class OOInstallFixture(unittest.TestCase): def setUp(self): @@ -133,13 +134,12 @@ class OOInstallFixture(unittest.TestCase): return path - class OOConfigTests(OOInstallFixture): def test_load_config(self): - cfg_path = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), SAMPLE_CONFIG) + cfg_path = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG) ooconfig = OOConfig(cfg_path) self.assertEquals(3, len(ooconfig.deployment.hosts)) @@ -155,26 +155,25 @@ class OOConfigTests(OOInstallFixture): def test_load_bad_config(self): - cfg_path = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), CONFIG_BAD) + cfg_path = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_BAD) try: OOConfig(cfg_path) assert False except OOConfigInvalidHostError: assert True - def test_load_complete_facts(self): - cfg_path = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), SAMPLE_CONFIG) + cfg_path = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG) ooconfig = OOConfig(cfg_path) missing_host_facts = ooconfig.calc_missing_facts() self.assertEquals(0, len(missing_host_facts)) # Test missing optional facts the user must confirm: def test_load_host_incomplete_facts(self): - cfg_path = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS) + cfg_path = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS) ooconfig = OOConfig(cfg_path) missing_host_facts = ooconfig.calc_missing_facts() self.assertEquals(2, len(missing_host_facts)) @@ -182,8 +181,8 @@ class OOConfigTests(OOInstallFixture): self.assertEquals(3, len(missing_host_facts['10.0.0.3'])) def test_write_config(self): - cfg_path = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), SAMPLE_CONFIG) + cfg_path = self.write_config( + os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG) ooconfig = OOConfig(cfg_path) ooconfig.save_to_disk() @@ -191,8 +190,6 @@ class OOConfigTests(OOInstallFixture): written_config = yaml.safe_load(f.read()) f.close() - - self.assertEquals(3, len(written_config['deployment']['hosts'])) for h in written_config['deployment']['hosts']: self.assertTrue('ip' in h) @@ -259,8 +256,10 @@ class HostTests(OOInstallFixture): # Given the `yaml_props` above we should see a line like this: # openshift_node_labels="{'region': 'infra'}" - node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' # Quotes around the hash - node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # No quotes around the hash + # Quotes around the hash + node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' + # No quotes around the hash + node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # The good line is present in the written inventory line self.assertIn(node_labels_expected, legacy_inventory_line) diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py index cabeaee34..a72e429d1 100644 --- a/utils/test/test_utils.py +++ b/utils/test/test_utils.py @@ -29,7 +29,6 @@ class TestUtils(unittest.TestCase): mock.call('OO_FOO: bar'), ] - ###################################################################### # Validate ooinstall.utils.debug_env functionality |