summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScott Dodson <sdodson@redhat.com>2016-11-30 10:35:55 -0500
committerGitHub <noreply@github.com>2016-11-30 10:35:55 -0500
commit161d6ce3976dbae137a5b90252fc9966734f7c15 (patch)
tree17acb009aaec787ef134ce63244e5bb64df98231
parente4e281fefdafd0ee5cadb11cae6c67a409c262ce (diff)
parent5c24cf417b08e0b427435d1bd5d27a4b03467092 (diff)
downloadopenshift-161d6ce3976dbae137a5b90252fc9966734f7c15.tar.gz
openshift-161d6ce3976dbae137a5b90252fc9966734f7c15.tar.bz2
openshift-161d6ce3976dbae137a5b90252fc9966734f7c15.tar.xz
openshift-161d6ce3976dbae137a5b90252fc9966734f7c15.zip
Merge pull request #2859 from dgoodwin/ovs-docker-restart
Cleanup ovs file and restart docker on every upgrade.
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml27
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py1
-rw-r--r--roles/openshift_node/handlers/main.yml6
8 files changed, 73 insertions, 41 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
index 439df5ffd..9f7961614 100644
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
@@ -1,9 +1,14 @@
+---
+# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
+# restart services. We will unconditionally restart all containerized services
+# because we have to unconditionally restart Docker:
+- set_fact:
+ skip_node_svc_handlers: True
+
- name: Update systemd units
include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-- name: Verifying the correct version was configured
- shell: grep {{ verify_upgrade_version }} {{ item }}
- with_items:
- - /etc/sysconfig/openvswitch
- - /etc/sysconfig/{{ openshift.common.service_type }}*
- when: verify_upgrade_version is defined
+# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
+# play when the node has already been marked schedulable again. (this would look strange
+# in logs otherwise)
+- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
new file mode 100644
index 000000000..d800b289b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -0,0 +1,29 @@
+---
+- name: Restart docker
+ service: name=docker state=restarted
+
+- name: Update docker facts
+ openshift_facts:
+ role: docker
+
+- name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 5d753447c..44ddf97ad 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -37,30 +37,5 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
-- service: name=docker state=started
-
-- name: Update docker facts
- openshift_facts:
- role: docker
-
-- name: Restart containerized services
- service: name={{ item }} state=started
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
- when: inventory_hostname in groups.oo_masters_to_config
+- include: restart.yml
+ when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index d7d1fe548..df2b664d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -6,7 +6,3 @@
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
-
-- name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
- when: component == "node"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 57c25aa41..4d714ef4e 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -29,6 +29,7 @@
- name: Backup etcd
include: ./etcd/backup.yml
+ when: openshift_upgrade_skip_etcd_backup | default(false) | bool
- name: Upgrade master packages
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 53d670196..bb7955c45 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -44,8 +44,13 @@
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
+
tasks:
+
- include: docker/upgrade.yml
+ vars:
+ # We will restart Docker ourselves after everything is ready:
+ skip_docker_restart: True
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- include: "{{ node_config_hook }}"
@@ -57,11 +62,31 @@
openshift_version: "{{ openshift_pkg_version | default('') }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+ - name: Remove obsolete docker-sdn-ovs.conf
+ file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent
+ when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
+
- include: containerized_node_upgrade.yml
when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
- - meta: flush_handlers
+ - name: Ensure containerized services stopped before Docker restart
+ service: name={{ item }} state=stopped
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+ # Mandatory Docker restart, ensure all containerized services are running:
+ - include: docker/restart.yml
+ - name: Restart rpm node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
- name: Set node schedulability
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 95a9d668a..eb29848ff 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1309,6 +1309,7 @@ def get_openshift_version(facts):
# No need to run this method repeatedly on a system if we already know the
# version
+ # TODO: We need a way to force reload this after upgrading bits.
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return chomp_commit_offset(facts['common']['version'])
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index ebe584588..cb51416d4 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,14 +1,14 @@
---
- name: restart openvswitch
systemd: name=openvswitch state=restarted
- when: not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
notify:
- restart openvswitch pause
- name: restart openvswitch pause
pause: seconds=15
- when: openshift.common.is_containerized | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
systemd: name={{ openshift.common.service_type }}-node state=restarted
- when: not (node_service_status_changed | default(false) | bool)
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)