diff options
author | OpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com> | 2017-09-22 17:14:23 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-09-22 17:14:23 -0700 |
commit | 4d0bbb8a0be72ca4b8008bbcce8383ea0f704377 (patch) | |
tree | 926f366210fcc52f0c4e3773c1d1da984ae30094 | |
parent | d09223f529e37fd4983bbb29f27edb403bbe010d (diff) | |
parent | 912002d316a4534f952f0f743b9e0406bc39e9a0 (diff) | |
download | openshift-4d0bbb8a0be72ca4b8008bbcce8383ea0f704377.tar.gz openshift-4d0bbb8a0be72ca4b8008bbcce8383ea0f704377.tar.bz2 openshift-4d0bbb8a0be72ca4b8008bbcce8383ea0f704377.tar.xz openshift-4d0bbb8a0be72ca4b8008bbcce8383ea0f704377.zip |
Merge pull request #5496 from mgugino-upstream-stage/remove-hosted-logging
Automatic merge from submit-queue
Remove unused openshift_hosted_logging role
This role has not been used for several releases.
It is not tested by an checks, and no meaningful
updates have been made to this role in several months.
-rw-r--r-- | roles/openshift_hosted_logging/README.md | 40 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/defaults/main.yml | 2 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/handlers/main.yml | 21 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/meta/main.yaml | 3 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/tasks/cleanup_logging.yaml | 59 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/tasks/deploy_logging.yaml | 177 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/tasks/main.yaml | 8 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/tasks/update_master_config.yaml | 7 | ||||
-rw-r--r-- | roles/openshift_hosted_logging/vars/main.yaml | 32 |
9 files changed, 0 insertions, 349 deletions
diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md deleted file mode 100644 index 680303853..000000000 --- a/roles/openshift_hosted_logging/README.md +++ /dev/null @@ -1,40 +0,0 @@ -###Required vars: - -- openshift_hosted_logging_hostname: kibana.example.com -- openshift_hosted_logging_elasticsearch_cluster_size: 1 -- openshift_hosted_logging_master_public_url: https://localhost:8443 - -###Optional vars: -- openshift_hosted_logging_image_prefix: logging image prefix. No default. Use this to specify an alternate image repository e.g. my.private.repo:5000/private_openshift/ -- target_registry: DEPRECATED - use openshift_hosted_logging_image_prefix instead -- openshift_hosted_logging_image_version: logging image version suffix. Defaults to the current version of the deployed software. -- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key -- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3 -- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying. -- openshift_hosted_logging_elasticsearch_instance_ram: Amount of RAM to reserve per ElasticSearch instance (e.g. 1024M, 2G). Defaults to 8GiB; must be at least 512M (Ref.: [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html\#\_memory). -- openshift_hosted_logging_elasticsearch_pvc_size: Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead. -- openshift_hosted_logging_elasticsearch_pvc_prefix: Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size `openshift_hosted_logging_elasticsearch_pvc_size`. -- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster). -- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534). -- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value) -- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to. -- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true". -- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value) -- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value) -- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs. -- openshift_hosted_logging_use_journal: *DEPRECATED - DO NOT USE* -- openshift_hosted_logging_journal_source: By default, if this param is unset or empty, logging will use `/var/log/journal` if it exists, or `/run/log/journal` if not. You can use this param to force logging to use a different location. -- openshift_hosted_logging_journal_read_from_head: Set to `true` to have fluentd read from the beginning of the journal, to get historical log data. Default is `false`. *WARNING* Using `true` may take several minutes or even hours, depending on the size of the journal, until any new records show up in Elasticsearch, and will cause fluentd to consume a lot of CPU and RAM resources. - -When `openshift_hosted_logging_enable_ops_cluster` is `True`, there are some -additional vars. These work the same as above for their non-ops counterparts, -but apply to the OPS cluster instance: -- openshift_hosted_logging_ops_hostname: kibana-ops.example.com -- openshift_hosted_logging_elasticsearch_ops_cluster_size -- openshift_hosted_logging_elasticsearch_ops_instance_ram -- openshift_hosted_logging_elasticsearch_ops_pvc_size -- openshift_hosted_logging_elasticsearch_ops_pvc_prefix -- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic -- openshift_hosted_logging_elasticsearch_ops_nodeselector -- openshift_hosted_logging_kibana_ops_nodeselector -- openshift_hosted_logging_curator_ops_nodeselector diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml deleted file mode 100644 index a01f24df8..000000000 --- a/roles/openshift_hosted_logging/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml deleted file mode 100644 index d7e83fe9a..000000000 --- a/roles/openshift_hosted_logging/handlers/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml deleted file mode 100644 index ab07a77c1..000000000 --- a/roles/openshift_hosted_logging/meta/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - { role: openshift_master_facts } diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml deleted file mode 100644 index 70b0d67a4..000000000 --- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Checking for logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging" - register: logging_project - failed_when: "'FAILED' in logging_project.stderr" - -- name: "Changing projects" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging" - - -- name: "Cleanup any previous logging infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}" - with_items: - - kibana - - fluentd - - elasticsearch - ignore_errors: yes - -- name: "Cleanup existing support infrastructure" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support" - ignore_errors: yes - -- name: "Cleanup existing secrets" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy" - ignore_errors: yes - register: clean_result - failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr - -- name: "Cleanup existing logging deployers" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all" - - -- name: "Cleanup logging project" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging" - - -- name: "Remove deployer template" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift" - register: delete_output - failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr - - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False - -- debug: msg="Success!" diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml deleted file mode 100644 index 78b624109..000000000 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- -- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead" - when: target_registry is defined and target_registry - -- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size" - when: "openshift_hosted_logging_hostname is not defined or - openshift_hosted_logging_elasticsearch_cluster_size is not defined or - openshift_hosted_logging_master_public_url is not defined" - -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: False - -- name: "Check for logging project already exists" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}' - register: logging_project_result - ignore_errors: True - -- name: "Create logging project" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging - when: logging_project_result.stdout == "" - -- name: "Changing projects" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging - -- name: "Creating logging deployer secret" - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }} - register: secret_output - failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr - -- name: "Create templates for logging accounts and the deployer" - command: > - {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig - -f {{ hosted_base }}/logging-deployer.yaml - --config={{ mktemp.stdout }}/admin.kubeconfig - -n logging - register: logging_import_template - failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0" - changed_when: "'created' in logging_import_template.stdout" - -- name: "Process the logging accounts template" - shell: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig - process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f - - register: process_deployer_accounts - failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr - -- name: "Set permissions for logging-deployer service account" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer - register: permiss_output - failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr - -- name: "Set permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd_output - failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr - -- name: "Set additional permissions for fluentd" - command: > - {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig - add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd - register: fluentd2_output - failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr - -- name: "Add rolebinding-reader to aggregated-logging-elasticsearch" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig - policy add-cluster-role-to-user rolebinding-reader \ - system:serviceaccount:logging:aggregated-logging-elasticsearch - register: rolebinding_reader_output - failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr - -- name: "Create ConfigMap for deployer parameters" - command: > - {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} - register: deployer_configmap_output - failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr - -- name: "Process the deployer template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}" - register: process_deployer - failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr - -- name: "Wait for image pull and deployer pod" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed" - register: result - until: result.rc == 0 - retries: 20 - delay: 15 - -- name: "Process imagestream template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: process_is - failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr - -- name: "Set insecured registry" - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - -- name: "Wait for imagestreams to become available" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd" - when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - register: result - until: result.rc == 0 - failed_when: result.rc == 1 and 'not found' not in result.stderr - retries: 20 - delay: 5 - -- name: "Wait for component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es - - kibana - - curator - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for ops component pods to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" - with_items: - - es-ops - - kibana-ops - - curator-ops - when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- name: "Wait for fluentd DaemonSet to exist" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 5 - -- name: "Deploy fluentd by labeling the node" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}" - -- name: "Wait for fluentd to be running" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running" - register: result - until: result.rc == 0 - failed_when: result.rc == 1 or 'Error' in result.stderr - retries: 20 - delay: 15 - -- include: update_master_config.yaml - -- debug: - msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually" - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml deleted file mode 100644 index 42568597a..000000000 --- a/roles/openshift_hosted_logging/tasks/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Cleanup logging deployment - include: "{{ role_path }}/tasks/cleanup_logging.yaml" - when: openshift_hosted_logging_cleanup | default(false) | bool - -- name: Deploy logging - include: "{{ role_path }}/tasks/deploy_logging.yaml" - when: not openshift_hosted_logging_cleanup | default(false) | bool diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml deleted file mode 100644 index 1122e059c..000000000 --- a/roles/openshift_hosted_logging/tasks/update_master_config.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Adding Kibana route information to loggingPublicURL - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: assetConfig.loggingPublicURL - yaml_value: "https://{{ logging_hostname }}" - notify: restart master diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml deleted file mode 100644 index 4b350b244..000000000 --- a/roles/openshift_hosted_logging/vars/main.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}" -ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}" -iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}" -oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}" -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}" -kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}" -pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}" -es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}" -es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}" -es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}" -es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}" -es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}" -es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}" -es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}" -es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}" -es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}" -es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}" -es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}" -es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}" -es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}" -fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}" -kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}" -kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}" -cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}" -cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}" -ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}" -journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}" -journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}" -ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}" -deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}" |