From 6a4b7a5eb6c4b5e747bab795e2428d7c3992f559 Mon Sep 17 00:00:00 2001
From: Jason DeTiberus <jdetiber@redhat.com>
Date: Wed, 1 Apr 2015 15:09:19 -0400
Subject: Configuration updates for latest builds and major refactor

Configuration updates for latest builds
- Switch to using create-node-config
- Switch sdn services to use etcd over SSL
- This re-uses the client certificate deployed on each node
- Additional node registration changes
- Do not assume that metadata service is available in openshift_facts module
- Call systemctl daemon-reload after installing openshift-master, openshift-sdn-master, openshift-node, openshift-sdn-node
- Fix bug overriding openshift_hostname and openshift_public_hostname in byo playbooks
- Start moving generated configs to /etc/openshift
- Some custom module cleanup
- Add known issue with ansible-1.9 to README_OSE.md
- Update to genericize the kubernetes_register_node module
  - Default to use kubectl for commands
  - Allow for overriding kubectl_cmd
  - In openshift_register_node role, override kubectl_cmd to openshift_kube
- Set default openshift_registry_url for enterprise when deployment_type is enterprise
- Fix openshift_register_node for client config change
- Ensure that master certs directory is created
- Add roles and filter_plugin symlinks to playbooks/common/openshift-master and node
- Allow non-root user with sudo nopasswd access
- Updates for README_OSE.md
- Update byo inventory for adding additional comments
- Updates for node cert/config sync to work with non-root user using sudo
- Move node config/certs to /etc/openshift/node
- Don't use path for mktemp. addresses: https://github.com/openshift/openshift-ansible/issues/154

Create common playbooks
- create common/openshift-master/config.yml
- create common/openshift-node/config.yml
- update playbooks to use new common playbooks
- update launch playbooks to call update playbooks
- fix openshift_registry and openshift_node_ip usage

Set default deployment type to origin
- openshift_repo updates for enabling origin deployments
  - also separate repo and gpgkey file structure
  - remove kubernetes repo since it isn't currently needed
- full deployment type support for bin/cluster
  - honor OS_DEPLOYMENT_TYPE env variable
  - add --deployment-type option, which will override OS_DEPLOYMENT_TYPE if set
  - if neither OS_DEPLOYMENT_TYPE or --deployment-type is set, defaults to
    origin installs

Additional changes:
- Add separate config action to bin/cluster that runs ansible config but does
  not update packages
- Some more duplication reduction in cluster playbooks.
- Rename task files in playbooks dirs to have tasks in their name for clarity.
- update aws/gce scripts to use a directory for inventory (otherwise when
  there are no hosts returned from dynamic inventory there is an error)

libvirt refactor and update

- add libvirt dynamic inventory
- updates to use dynamic inventory for libvirt
---
 .../openshift-cluster/tasks/configure_libvirt.yml  |   6 ++
 .../tasks/configure_libvirt_network.yml            |  27 ++++++
 .../tasks/configure_libvirt_storage_pool.yml       |  27 ++++++
 .../openshift-cluster/tasks/launch_instances.yml   | 104 +++++++++++++++++++++
 4 files changed, 164 insertions(+)
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
 create mode 100644 playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

(limited to 'playbooks/libvirt/openshift-cluster/tasks')

diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
new file mode 100644
index 000000000..f237c1a60
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
@@ -0,0 +1,6 @@
+---
+- include: configure_libvirt_storage_pool.yml
+  when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
+
+- include: configure_libvirt_network.yml
+  when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
new file mode 100644
index 000000000..1cd83f7be
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -0,0 +1,27 @@
+---
+- name: Test if libvirt network for openshift already exists
+  command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
+  register: net_info_result
+  changed_when: False
+  failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr"
+
+- name: Create a temp directory for the template xml file
+  command: "/usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX"
+  register: mktemp
+  when: net_info_result.rc == 1
+
+- name: Create network xml file
+  template:
+    src: templates/network.xml
+    dest: "{{ mktemp.stdout }}/network.xml"
+  when: net_info_result.rc == 1
+
+- name: Create libvirt network for openshift
+  command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
+  when: net_info_result.rc == 1
+
+- name: Remove the temp directory
+  file:
+    path: "{{ mktemp.stdout }}"
+    state: absent
+  when: net_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
new file mode 100644
index 000000000..817acb250
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -0,0 +1,27 @@
+---
+- name: Create libvirt storage directory for openshift
+  file:
+    dest: "{{ libvirt_storage_pool_path }}"
+    state: directory
+
+- acl:
+    default: yes
+    entity: kvm
+    etype: group
+    name: "{{ libvirt_storage_pool_path }}"
+    permissions: rwx
+    state: present
+
+- name: Test if libvirt storage pool for openshift already exists
+  command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
+  register: pool_info_result
+  changed_when: False
+  failed_when: "pool_info_result.rc != 0 and 'error: Storage pool not found:' not in pool_info_result.stderr"
+
+- name: Create the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+  when: pool_info_result.rc == 1
+
+- name: Refresh the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+  when: pool_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..96d440096
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,104 @@
+---
+# TODO: Add support for choosing base image based on deployment_type and os
+# wanted (os wanted needs support added in bin/cluster with sane defaults:
+# fedora/centos for origin, rhel for online/enterprise)
+
+# TODO: create a role to encapsulate some of this complexity, possibly also
+# create a module to manage the storage tasks, network tasks, and possibly
+# even handle the libvirt tasks to set metadata in the domain xml and be able
+# to create/query data about vms without having to use xml the python libvirt
+# bindings look like a good candidate for this
+
+- name: Download Base Cloud image
+  get_url:
+    url: '{{ image_url }}'
+    sha256sum: '{{ image_sha256 }}'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+
+- name: Create the cloud-init config drive path
+  file:
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+    state: directory
+  with_items: instances
+
+- name: Create the cloud-init config drive files
+  template:
+    src: '{{ item[1] }}'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
+  with_nested:
+    - instances
+    - [ user-data, meta-data ]
+
+- name: Create the cloud-init config drive
+  command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+  args:
+    chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+    creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+  with_items: instances
+
+- name: Create VMs drives
+  command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+  with_items: instances
+
+- name: Create VMs
+  virt:
+    name: '{{ item }}'
+    command: define
+    xml: "{{ lookup('template', '../templates/domain.xml') }}"
+    uri: '{{ libvirt_uri }}'
+  with_items: instances
+
+- name: Start VMs
+  virt:
+    name: '{{ item }}'
+    state: running
+    uri: '{{ libvirt_uri }}'
+  with_items: instances
+
+- name: Collect MAC addresses of the VMs
+  shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+  register: scratch_mac
+  with_items: instances
+
+- name: Wait for the VMs to get an IP
+  command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
+  ignore_errors: yes
+  register: nb_allocated_ips
+  until: nb_allocated_ips.stdout == '{{ instances | length }}'
+  retries: 30
+  delay: 1
+
+- name: Collect IP addresses of the VMs
+  shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+  register: scratch_ip
+  with_items: scratch_mac.results
+
+- set_fact:
+    ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+
+- name: Add new instances
+  add_host:
+    hostname: '{{ item.0 }}'
+    ansible_ssh_host: '{{ item.1 }}'
+    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+  with_together:
+    - instances
+    - ips
+
+- name: Wait for ssh
+  wait_for:
+    host: '{{ item }}'
+    port: 22
+  with_items: ips
+
+- name: Wait for openshift user setup
+  command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
+  register: result
+  until: result.rc == 0
+  retries: 30
+  delay: 1
+  with_together:
+  - instances
+  - ips
-- 
cgit v1.2.3