diff options
Diffstat (limited to 'roles')
31 files changed, 812 insertions, 37 deletions
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml index 7831f4c7d..8dd916e79 100644 --- a/roles/container_runtime/tasks/docker_upgrade_check.yml +++ b/roles/container_runtime/tasks/docker_upgrade_check.yml @@ -21,6 +21,7 @@ retries: 4 until: curr_docker_version is succeeded changed_when: false + when: not openshift_is_atomic | bool - name: Get latest available version of Docker command: > @@ -29,7 +30,9 @@ retries: 4 until: avail_docker_version is succeeded # Don't expect docker rpm to be available on hosts that don't already have it installed: - when: pkg_check.rc == 0 + when: + - not openshift_is_atomic | bool + - pkg_check.rc == 0 failed_when: false changed_when: false @@ -37,9 +40,10 @@ msg: This playbook requires access to Docker 1.12 or later # Disable the 1.12 requirement if the user set a specific Docker version when: - - docker_version is not defined - - docker_upgrade is not defined or docker_upgrade | bool == True - - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<'))) + - not openshift_is_atomic | bool + - docker_version is not defined + - docker_upgrade is not defined or docker_upgrade | bool == True + - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<'))) # Default l_docker_upgrade to False, we'll set to True if an upgrade is required: - set_fact: @@ -48,14 +52,17 @@ # Make sure a docker_version is set if none was requested: - set_fact: docker_version: "{{ avail_docker_version.stdout }}" - when: pkg_check.rc == 0 and docker_version is not defined + when: + - not openshift_is_atomic | bool + - pkg_check.rc == 0 and docker_version is not defined - name: Flag for Docker upgrade if necessary set_fact: l_docker_upgrade: True when: - - pkg_check.rc == 0 - - curr_docker_version.stdout is version_compare(docker_version,'<') + - not openshift_is_atomic | bool + - pkg_check.rc == 0 + - curr_docker_version.stdout is version_compare(docker_version,'<') # Additional checks for Atomic hosts: - name: Determine available Docker @@ -70,5 +77,5 @@ - fail: msg: This playbook requires access to Docker 1.12 or later when: - - openshift_is_atomic | bool - - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') + - openshift_is_atomic | bool + - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml index cf49a6ebf..df53280c8 100644 --- a/roles/openshift_buildoverrides/vars/main.yml +++ b/roles/openshift_buildoverrides/vars/main.yml @@ -9,3 +9,4 @@ buildoverrides_yaml: imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}" nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}" annotations: "{{ openshift_buildoverrides_annotations | default(None) }}" + tolerations: "{{ openshift_buildoverrides_tolerations | default(None) }}" diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml index dff492a69..3513577fa 100644 --- a/roles/openshift_cloud_provider/tasks/main.yml +++ b/roles/openshift_cloud_provider/tasks/main.yml @@ -19,3 +19,6 @@ - include_tasks: gce.yml when: cloudprovider_is_gce | bool + +- include_tasks: vsphere.yml + when: cloudprovider_is_vsphere | bool diff --git a/roles/openshift_cloud_provider/tasks/vsphere.yml b/roles/openshift_cloud_provider/tasks/vsphere.yml new file mode 100644 index 000000000..3a33df241 --- /dev/null +++ b/roles/openshift_cloud_provider/tasks/vsphere.yml @@ -0,0 +1,6 @@ +--- +- name: Create cloud config + template: + dest: "{{ openshift.common.config_base }}/cloudprovider/vsphere.conf" + src: vsphere.conf.j2 + when: openshift_cloudprovider_vsphere_username is defined and openshift_cloudprovider_vsphere_password is defined and openshift_cloudprovider_vsphere_host is defined and openshift_cloudprovider_vsphere_datacenter is defined and openshift_cloudprovider_vsphere_datastore is defined diff --git a/roles/openshift_cloud_provider/templates/vsphere.conf.j2 b/roles/openshift_cloud_provider/templates/vsphere.conf.j2 new file mode 100644 index 000000000..84e5e371c --- /dev/null +++ b/roles/openshift_cloud_provider/templates/vsphere.conf.j2 @@ -0,0 +1,15 @@ +[Global] +user = "{{ openshift_cloudprovider_vsphere_username }}" +password = "{{ openshift_cloudprovider_vsphere_password }}" +server = "{{ openshift_cloudprovider_vsphere_host }}" +port = 443 +insecure-flag = 1 +datacenter = {{ openshift_cloudprovider_vsphere_datacenter }} +datastore = {{ openshift_cloudprovider_vsphere_datastore }} +{% if openshift_cloudprovider_vsphere_folder is defined %} +working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}/ +{% else %} +working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/ +{% endif %} +[Disk] +scsicontrollertype = pvscsi diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml index c9d953f58..e71db80b9 100644 --- a/roles/openshift_cloud_provider/vars/main.yml +++ b/roles/openshift_cloud_provider/vars/main.yml @@ -3,3 +3,4 @@ has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}" cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}" cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}" cloudprovider_is_gce: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'gce' }}" +cloudprovider_is_vsphere: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'vsphere' }}" diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index dcaf87eca..c83adb26d 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -175,6 +175,8 @@ def format_failure(failure): play = failure['play'] task = failure['task'] msg = failure['msg'] + if not isinstance(msg, string_types): + msg = str(msg) checks = failure['checks'] fields = ( (u'Hosts', host), diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index b6501d288..f40085976 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -69,7 +69,7 @@ r_openshift_hosted_router_os_firewall_allow: [] ############ openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" -penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" +openshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" openshift_hosted_registry_routecertificates: {} openshift_hosted_registry_routetermination: "passthrough" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 27cfc17d6..a192bd67e 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -177,6 +177,9 @@ Elasticsearch OPS too, if using an OPS cluster: clients will use to connect to mux, and will be used in the TLS server cert subject. - `openshift_logging_mux_port`: 24284 +- `openshift_logging_mux_external_address`: The IP address that mux will listen + on for connections from *external* clients. Default is the default ipv4 + interface as reported by the `ansible_default_ipv4` fact. - `openshift_logging_mux_cpu_request`: 100m - `openshift_logging_mux_memory_limit`: 512Mi - `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 302a9b4c9..37ffb0204 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -276,7 +276,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict()) # this needs to end up nested under the service account... @@ -288,7 +288,7 @@ class OpenshiftLoggingFacts(OCBaseCommand): return for item in role["subjects"]: comp = self.comp(item["name"]) - if comp is not None and namespace == item["namespace"]: + if comp is not None and namespace == item.get("namespace"): self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) # pylint: disable=no-self-use, too-many-return-statements diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 67904a9d3..ebd2d747b 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -94,7 +94,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch.pvcs }}" - "{{ es_indices }}" loop_control: @@ -169,7 +169,7 @@ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}" with_together: - - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}" - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" - "{{ es_ops_indices }}" loop_control: diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml index 5bee58725..df5299a83 100644 --- a/roles/openshift_logging_curator/vars/main.yml +++ b/roles/openshift_logging_curator/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_curator_version: "3_8" -__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_curator_version: "3_9" +__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index ef259cd3a..122231031 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -1,6 +1,6 @@ --- -__latest_es_version: "3_8" -__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_es_version: "3_9" +__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 529859983..79ebbca08 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -172,8 +172,8 @@ app_port: "{{ openshift_logging_fluentd_app_port }}" ops_host: "{{ openshift_logging_fluentd_ops_host }}" ops_port: "{{ openshift_logging_fluentd_ops_port }}" - fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" - fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" + fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys() | first }}" + fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values() | first }}" fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}" fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}" fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}" diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml index 762e3d4d0..b60da814f 100644 --- a/roles/openshift_logging_fluentd/vars/main.yml +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -1,5 +1,5 @@ --- -__latest_fluentd_version: "3_8" -__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_fluentd_version: "3_9" +__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] __allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] __allowed_mux_client_modes: ["minimal", "maximal"] diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml index a2c54d8e4..fed926a3b 100644 --- a/roles/openshift_logging_kibana/vars/main.yml +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_kibana_version: "3_8" -__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_kibana_version: "3_9" +__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index db6f23126..dbf4549c4 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -30,6 +30,7 @@ openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}" openshift_logging_mux_port: 24284 +openshift_logging_mux_external_address: "{{ ansible_default_ipv4.address }}" # the namespace to use for undefined projects should come first, followed by any # additional namespaces to create by default - users will typically not need to set this openshift_logging_mux_default_namespaces: ["mux-undefined"] diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 34bdb891c..7eba3cda4 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -148,7 +148,7 @@ port: "{{ openshift_logging_mux_port }}" targetPort: "mux-forward" external_ips: - - "{{ ansible_eth0.ipv4.address }}" + - "{{ openshift_logging_mux_external_address }}" when: openshift_logging_mux_allow_external | bool - name: Set logging-mux service for internal communication diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml index 1da053b4a..e87205bad 100644 --- a/roles/openshift_logging_mux/vars/main.yml +++ b/roles/openshift_logging_mux/vars/main.yml @@ -1,3 +1,3 @@ --- -__latest_mux_version: "3_8" -__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8"] +__latest_mux_version: "3_9" +__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"] diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml index f72710832..7870f43e2 100644 --- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml @@ -12,11 +12,11 @@ package: name={{ master_pkgs | join(',') }} state=present vars: master_pkgs: - - "{{ openshift_service_type }}{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}" + - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}" + - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" register: result until: result is succeeded diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index 91a358095..d4b47bb9e 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -12,7 +12,7 @@ until: result is succeeded vars: openshift_node_upgrade_rpm_list: - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" - "PyYAML" - "dnsmasq" diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml index c9094e05a..ef5d8d662 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -14,6 +14,6 @@ until: result is succeeded vars: openshift_node_upgrade_rpm_list: - - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}" - "PyYAML" - "openvswitch" diff --git a/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml new file mode 100644 index 000000000..34af652c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml @@ -0,0 +1,133 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: ${HEKETI_FSTAB} + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml new file mode 100644 index 000000000..064b51473 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml @@ -0,0 +1,67 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3-pvcs + labels: + glusterfs: s3-pvcs-template + gluster-s3: pvcs-template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${PVC_SIZE}" +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${META_PVC}" + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}" + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "${META_PVC_SIZE}" +parameters: +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + required: true +- name: PVC_SIZE + displayName: Primary GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage + value: 2Gi +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + required: true +- name: META_PVC_SIZE + displayName: Metadata GlusterFS-backed PVC capacity + description: Capacity for GlusterFS-backed PVC for object storage metadata + value: 1Gi +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml new file mode 100644 index 000000000..896a1b226 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml @@ -0,0 +1,140 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: gluster-s3 + labels: + glusterfs: s3-template + gluster-s3: template + annotations: + description: Gluster S3 service template + tags: glusterfs,heketi,gluster-s3 +objects: +- kind: Service + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service + spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + glusterfs: s3-pod + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route + spec: + to: + kind: Service + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc + annotations: + openshift.io/scc: privileged + description: Defines how to deploy gluster s3 object storage + spec: + replicas: 1 + selector: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + template: + metadata: + name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3 + labels: + glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod + gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod + spec: + containers: + - name: gluster-s3 + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + ports: + - name: gluster + containerPort: 8080 + protocol: TCP + env: + - name: S3_ACCOUNT + value: "${S3_ACCOUNT}" + - name: S3_USER + value: "${S3_USER}" + - name: S3_PASSWORD + value: "${S3_PASSWORD}" + resources: {} + volumeMounts: + - name: gluster-vol1 + mountPath: "/mnt/gluster-object/${S3_ACCOUNT}" + - name: gluster-vol2 + mountPath: "/mnt/gluster-object/gsmetadata" + - name: glusterfs-cgroup + readOnly: true + mountPath: "/sys/fs/cgroup" + terminationMessagePath: "/dev/termination-log" + securityContext: + privileged: true + volumes: + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: gluster-vol1 + persistentVolumeClaim: + claimName: ${PVC} + - name: gluster-vol2 + persistentVolumeClaim: + claimName: ${META_PVC} + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + serviceAccountName: default + serviceAccount: default + securityContext: {} +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage +- name: S3_ACCOUNT + displayName: S3 Account Name + description: S3 storage account which will provide storage on GlusterFS volumes + required: true +- name: S3_USER + displayName: S3 User + description: S3 user who can access the S3 storage account + required: true +- name: S3_PASSWORD + displayName: S3 User Password + description: Password for the S3 user + required: true +- name: PVC + displayName: Primary GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage + value: gluster-s3-claim +- name: META_PVC + displayName: Metadata GlusterFS-backed PVC + description: GlusterFS-backed PVC for object storage metadata + value: gluster-s3-meta-claim +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml new file mode 100644 index 000000000..63dd5cce6 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml @@ -0,0 +1,104 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-template + glusterblock: template + annotations: + description: glusterblock provisioner template + tags: glusterfs +objects: +- kind: ClusterRole + apiVersion: v1 + metadata: + name: glusterblock-provisioner-runner + labels: + glusterfs: block-provisioner-runner-clusterrole + glusterblock: provisioner-runner-clusterrole + rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "delete"] + - apiGroups: [""] + resources: ["routes"] + verbs: ["get", "list"] +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-sa + glusterblock: ${CLUSTER_NAME}-provisioner-sa +- apiVersion: v1 + kind: ClusterRoleBinding + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner + roleRef: + name: glusterblock-provisioner-runner + subjects: + - kind: ServiceAccount + name: glusterblock-${CLUSTER_NAME}-provisioner + namespace: ${NAMESPACE} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: glusterblock-${CLUSTER_NAME}-provisioner-dc + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-dc + glusterblock: ${CLUSTER_NAME}-provisioner-dc + annotations: + description: Defines how to deploy the glusterblock provisioner pod. + spec: + replicas: 1 + selector: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: glusterblock-provisioner + labels: + glusterfs: block-${CLUSTER_NAME}-provisioner-pod + spec: + serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner + containers: + - name: glusterblock-provisioner + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: PROVISIONER_NAME + value: gluster.org/glusterblock +parameters: +- name: IMAGE_NAME + displayName: glusterblock provisioner container image name + required: True +- name: IMAGE_VERSION + displayName: glusterblock provisioner container image version + required: True +- name: NAMESPACE + displayName: glusterblock provisioner namespace + description: The namespace in which these resources are being created + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml new file mode 100644 index 000000000..09850a2c2 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml @@ -0,0 +1,154 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: GB_GLFS_LRU_COUNT + value: "${GB_GLFS_LRU_COUNT}" + - name: TCMU_LOGDIR + value: "${TCMU_LOGDIR}" + resources: + requests: + memory: 100Mi + cpu: 100m + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage +- name: GB_GLFS_LRU_COUNT + displayName: Maximum number of block hosting volumes + description: This value is to set maximum number of block hosting volumes. + value: "15" + required: true +- name: TCMU_LOGDIR + displayName: Tcmu runner log directory + description: This value is to set tcmu runner log directory + value: "/var/log/glusterfs/gluster-block" + required: true diff --git a/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml new file mode 100644 index 000000000..28cdb2982 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + heketi: ${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + heketi: ${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + heketi: ${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + heketi: ${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: ${HEKETI_FSTAB} + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_FSTAB + displayName: heketi fstab path + description: Set the fstab path, file that is populated with bricks that heketi creates + value: /var/lib/heketi/fstab +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml index 8ee95e36b..12916961b 100644 --- a/roles/openshift_web_console/tasks/install.yml +++ b/roles/openshift_web_console/tasks/install.yml @@ -18,6 +18,8 @@ oc_project: name: openshift-web-console state: present + node_selector: + - "" - name: Make temp directory for asset config files command: mktemp -d /tmp/console-ansible-XXXXXX diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml index 77afe1f43..662d65d9f 100644 --- a/roles/template_service_broker/vars/default_images.yml +++ b/roles/template_service_broker/vars/default_images.yml @@ -1,4 +1,4 @@ --- __template_service_broker_prefix: "docker.io/openshift/" __template_service_broker_version: "latest" -__template_service_broker_image_name: "origin" +__template_service_broker_image_name: "origin-template-service-broker" diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml index dfab1e01b..16a08e72f 100644 --- a/roles/template_service_broker/vars/openshift-enterprise.yml +++ b/roles/template_service_broker/vars/openshift-enterprise.yml @@ -1,4 +1,4 @@ --- __template_service_broker_prefix: "registry.access.redhat.com/openshift3/" __template_service_broker_version: "v3.7" -__template_service_broker_image_name: "ose" +__template_service_broker_image_name: "ose-template-service-broker" |