diff options
Diffstat (limited to 'roles')
88 files changed, 2079 insertions, 2211 deletions
| diff --git a/roles/calico/templates/calicoctl.conf.j2 b/roles/calico/templates/10-calico.conf.j2 index 3c8c6b046..3c8c6b046 100644 --- a/roles/calico/templates/calicoctl.conf.j2 +++ b/roles/calico/templates/10-calico.conf.j2 diff --git a/roles/calico/templates/10-calico.cfg.j2 b/roles/calico/templates/calicoctl.cfg.j2 index 722385ed8..722385ed8 100644 --- a/roles/calico/templates/10-calico.cfg.j2 +++ b/roles/calico/templates/calicoctl.cfg.j2 diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 330de07eb..15e9c606d 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1450,7 +1450,7 @@ class OCProcess(OpenShiftCLI):          if self._template is None:              results = self._process(self.name, False, self.params, self.data)              if results['returncode'] != 0: -                raise OpenShiftCLIError('Error processing template [%s].' % self.name) +                raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))              self._template = results['results']['items']          return self._template diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py index eba9a43cd..62a6bd571 100644 --- a/roles/lib_openshift/src/class/oc_process.py +++ b/roles/lib_openshift/src/class/oc_process.py @@ -30,7 +30,7 @@ class OCProcess(OpenShiftCLI):          if self._template is None:              results = self._process(self.name, False, self.params, self.data)              if results['returncode'] != 0: -                raise OpenShiftCLIError('Error processing template [%s].' % self.name) +                raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))              self._template = results['results']['items']          return self._template diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml deleted file mode 100644 index 334c9402b..000000000 --- a/roles/openshift_logging/files/logging-deployer-sa.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: -  name: logging-deployer -secrets: -- name: logging-deployer diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index 44b0b2d48..eac086e81 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -42,7 +42,7 @@ def map_from_pairs(source, delim="="):      if source == '':          return dict() -    return dict(source.split(delim) for item in source.split(",")) +    return dict(item.split(delim) for item in source.split(","))  # pylint: disable=too-few-public-methods diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index a55e72725..35accfb78 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -318,7 +318,7 @@ def main():      ''' The main method '''      module = AnsibleModule(   # noqa: F405          argument_spec=dict( -            admin_kubeconfig={"required": True, "type": "str"}, +            admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},              oc_bin={"required": True, "type": "str"},              openshift_logging_namespace={"required": True, "type": "str"}          ), diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 2f5b68b4d..0c7152b16 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -1,43 +1,42 @@  --- -- name: stop logging -  include: stop_cluster.yaml -  # delete the deployment objects that we had created  - name: delete logging api objects -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true +  oc_obj: +    state: absent +    kind: "{{ item }}" +    namespace: "{{ openshift_logging_namespace }}" +    selector: "logging-infra"    with_items:      - dc      - rc      - svc      - routes      - templates -    - daemonset -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 - +    - ds  # delete the oauthclient  - name: delete oauthclient kibana-proxy -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 +  oc_obj: +    state: absent +    kind: oauthclient +    namespace: "{{ openshift_logging_namespace }}" +    name: kibana-proxy  # delete any image streams that we may have created  - name: delete logging is -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 +  oc_obj: +    state: absent +    kind: is +    namespace: "{{ openshift_logging_namespace }}" +    selector: "logging-infra=support"  # delete our old secrets  - name: delete logging secrets -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true +  oc_obj: +    state: absent +    kind: secret +    namespace: "{{ openshift_logging_namespace }}" +    name: "{{ item }}"    with_items:      - logging-fluentd      - logging-elasticsearch @@ -45,71 +44,55 @@      - logging-kibana-proxy      - logging-curator      - logging-mux -  ignore_errors: yes -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 - -# delete role bindings -- name: delete rolebindings -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true -  with_items: -    - logging-elasticsearch-view-role -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 - -# delete cluster role bindings -- name: delete cluster role bindings -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true -  with_items: -    - rolebinding-reader -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 - -# delete cluster roles -- name: delete cluster roles -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true -  with_items: -    - rolebinding-reader -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0  # delete our service accounts  - name: delete service accounts    oc_serviceaccount: -    name: "{{ item }}" -    namespace: "{{ openshift_logging_namespace }}"      state: absent +    namespace: "{{ openshift_logging_namespace }}" +    name: "{{ item }}"    with_items:      - aggregated-logging-elasticsearch      - aggregated-logging-kibana      - aggregated-logging-curator      - aggregated-logging-fluentd -# delete our roles -- name: delete roles -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true +# delete role bindings +- name: delete rolebindings +  oc_obj: +    state: absent +    kind: rolebinding +    namespace: "{{ openshift_logging_namespace }}" +    name: logging-elasticsearch-view-role + +# delete cluster role bindings +- name: delete cluster role bindings +  oc_obj: +    state: absent +    kind: clusterrolebindings +    namespace: "{{ openshift_logging_namespace }}" +    name: rolebinding-reader + +# delete cluster roles +- name: delete cluster roles +  oc_obj: +    state: absent +    kind: clusterrole +    namespace: "{{ openshift_logging_namespace }}" +    name: "{{ item }}"    with_items: +    - rolebinding-reader      - daemonset-admin -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0  # delete our configmaps  - name: delete configmaps -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true +  oc_obj: +    state: absent +    kind: configmap +    namespace: "{{ openshift_logging_namespace }}" +    name: "{{ item }}"    with_items:      - logging-curator      - logging-elasticsearch      - logging-fluentd      - logging-mux -  register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 46a7e82c6..7169c4036 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -51,14 +51,14 @@    with_items:      - procure_component: mux        hostnames: "logging-mux, {{openshift_logging_mux_hostname}}" -  when: openshift_logging_use_mux +  when: openshift_logging_use_mux | bool  - include: procure_shared_key.yaml    loop_control:      loop_var: shared_key_info    with_items:      - procure_component: mux -  when: openshift_logging_use_mux +  when: openshift_logging_use_mux | bool  - include: procure_server_certs.yaml    loop_control: diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml deleted file mode 100644 index 56f590717..000000000 --- a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Generate ClusterRoleBindings -  template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml -  vars: -    acct_name: aggregated-logging-elasticsearch -    obj_name: rolebinding-reader -    crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"] -    subjects: -      - kind: ServiceAccount -        name: "{{acct_name}}" -        namespace: "{{openshift_logging_namespace}}" -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml deleted file mode 100644 index 0b8b1014c..000000000 --- a/roles/openshift_logging/tasks/generate_clusterroles.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Generate ClusterRole for cluster-reader -  template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml -  vars: -    obj_name: rolebinding-reader -    rules: -      - resources: [clusterrolebindings] -        verbs: -          - get -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml deleted file mode 100644 index b047eb35a..000000000 --- a/roles/openshift_logging/tasks/generate_configmaps.yaml +++ /dev/null @@ -1,178 +0,0 @@ ---- -- block: -    - fail: -        msg: "The openshift_logging_es_log_appenders '{{openshift_logging_es_log_appenders}}' has an unrecognized option and only supports the following as a list: {{es_log_appenders | join(', ')}}" -      when: -        - es_logging_contents is undefined -        - "{{ openshift_logging_es_log_appenders | list | difference(es_log_appenders) | length != 0 }}" -      changed_when: no - -    - template: -        src: elasticsearch-logging.yml.j2 -        dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" -      vars: -        root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}" -      when: es_logging_contents is undefined -      changed_when: no -      check_mode: no - -    - local_action: > -        template src=elasticsearch.yml.j2 -        dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml" -      vars: -        - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}" -        - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" -        - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}" -      when: es_config_contents is undefined -      changed_when: no - -    - copy: -        content: "{{ config_source | combine(override_config,recursive=True) | to_nice_yaml }}" -        dest: "{{mktemp.stdout}}/elasticsearch.yml" -      vars: -        config_source: "{{lookup('file','{{local_tmp.stdout}}/elasticsearch-gen-template.yml') | from_yaml }}" -        override_config: "{{openshift_logging_es_config | from_yaml}}" -      when: es_logging_contents is undefined -      changed_when: no - -    - copy: -        content: "{{es_logging_contents}}" -        dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" -      when: es_logging_contents is defined -      changed_when: no - -    - copy: -        content: "{{es_config_contents}}" -        dest: "{{mktemp.stdout}}/elasticsearch.yml" -      when: es_config_contents is defined -      changed_when: no - -    - command: > -        {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch -        --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run -      register: es_configmap -      changed_when: no - -    - copy: -        content: "{{es_configmap.stdout}}" -        dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml" -      when: es_configmap.stdout is defined -      changed_when: no -  check_mode: no - -- block: -    - copy: -        src: curator.yml -        dest: "{{mktemp.stdout}}/curator.yml" -      when: curator_config_contents is undefined -      changed_when: no - -    - copy: -        content: "{{curator_config_contents}}" -        dest: "{{mktemp.stdout}}/curator.yml" -      when: curator_config_contents is defined -      changed_when: no - -    - command: > -        {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator -        --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run -      register: curator_configmap -      changed_when: no - -    - copy: -        content: "{{curator_configmap.stdout}}" -        dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml" -      when: curator_configmap.stdout is defined -      changed_when: no -  check_mode: no - -- block: -    - copy: -        src: fluent.conf -        dest: "{{mktemp.stdout}}/fluent.conf" -      when: fluentd_config_contents is undefined -      changed_when: no - -    - copy: -        src: fluentd-throttle-config.yaml -        dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" -      when: fluentd_throttle_contents is undefined -      changed_when: no - -    - copy: -        src: secure-forward.conf -        dest: "{{mktemp.stdout}}/secure-forward.conf" -      when: fluentd_securefoward_contents is undefined -      changed_when: no - -    - copy: -        content: "{{fluentd_config_contents}}" -        dest: "{{mktemp.stdout}}/fluent.conf" -      when: fluentd_config_contents is defined -      changed_when: no - -    - copy: -        content: "{{fluentd_throttle_contents}}" -        dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" -      when: fluentd_throttle_contents is defined -      changed_when: no - -    - copy: -        content: "{{fluentd_secureforward_contents}}" -        dest: "{{mktemp.stdout}}/secure-forward.conf" -      when: fluentd_secureforward_contents is defined -      changed_when: no - -    - command: > -        {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd -        --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml -        --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run -      register: fluentd_configmap -      changed_when: no - -    - copy: -        content: "{{fluentd_configmap.stdout}}" -        dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml" -      when: fluentd_configmap.stdout is defined -      changed_when: no -  check_mode: no - -- block: -    - copy: -        src: fluent.conf -        dest: "{{mktemp.stdout}}/fluent-mux.conf" -      when: fluentd_mux_config_contents is undefined -      changed_when: no - -    - copy: -        src: secure-forward.conf -        dest: "{{mktemp.stdout}}/secure-forward-mux.conf" -      when: fluentd_mux_securefoward_contents is undefined -      changed_when: no - -    - copy: -        content: "{{fluentd_mux_config_contents}}" -        dest: "{{mktemp.stdout}}/fluent-mux.conf" -      when: fluentd_mux_config_contents is defined -      changed_when: no - -    - copy: -        content: "{{fluentd_mux_secureforward_contents}}" -        dest: "{{mktemp.stdout}}/secure-forward-mux.conf" -      when: fluentd_mux_secureforward_contents is defined -      changed_when: no - -    - command: > -        {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux -        --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf -        --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run -      register: mux_configmap -      changed_when: no - -    - copy: -        content: "{{mux_configmap.stdout}}" -        dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml" -      when: mux_configmap.stdout is defined -      changed_when: no -  check_mode: no -  when: openshift_logging_use_mux diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml deleted file mode 100644 index 8aea4e81f..000000000 --- a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Generate kibana deploymentconfig -  template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml -  vars: -    component: kibana -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" -    proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" -    es_host: logging-es -    es_port: "{{openshift_logging_es_port}}" -  check_mode: no -  changed_when: no - -- name: Generate OPS kibana deploymentconfig -  template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml -  vars: -    component: kibana-ops -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" -    proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" -    es_host: logging-es-ops -    es_port: "{{openshift_logging_es_ops_port}}" -  check_mode: no -  changed_when: no - -- name: Generate elasticsearch deploymentconfig -  template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml -  vars: -    component: es -    deploy_name_prefix: "logging-{{component}}" -    deploy_name: "{{deploy_name_prefix}}-abc123" -    image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" -    es_cluster_name: "{{component}}" -  check_mode: no -  changed_when: no - -- name: Generate OPS elasticsearch deploymentconfig -  template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml -  vars: -    component: es-ops -    deploy_name_prefix: "logging-{{component}}" -    deploy_name: "{{deploy_name_prefix}}-abc123" -    image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" -    es_cluster_name: "{{component}}" -  check_mode: no -  changed_when: no - -- name: Generate curator deploymentconfig -  template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml -  vars: -    component: curator -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" -  check_mode: no -  changed_when: no - -- name: Generate OPS curator deploymentconfig -  template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml -  vars: -    component: curator-ops -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" -    openshift_logging_es_host: logging-es-ops -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml deleted file mode 100644 index fa7a86c27..000000000 --- a/roles/openshift_logging/tasks/generate_pvcs.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: Init pool of PersistentVolumeClaim names -  set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}} -  vars: -    pvc_name: "{{es_pvc_prefix}}-{{item| int}}" -    start: "{{es_pvc_names | map('regex_search', es_pvc_prefix+'.*')|select('string')|list|length}}" -  with_sequence: start={{start}} end={{ (start|int > es_cluster_size|int - 1) | ternary(start, es_cluster_size|int - 1)}} -  when: -    - "{{ es_dc_names|default([]) | length <= es_cluster_size|int }}" -    - es_pvc_size | search('^\d.*') -  check_mode: no - -- name: Generating PersistentVolumeClaims -  template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml -  vars: -    obj_name: "{{claim_name}}" -    size: "{{es_pvc_size}}" -    access_modes: "{{ es_access_modes | list }}" -    pv_selector: "{{es_pv_selector}}" -  with_items: -    - "{{es_pvc_pool | default([])}}" -  loop_control: -    loop_var: claim_name -  when: -    - not es_pvc_dynamic -    - es_pvc_pool is defined -  check_mode: no -  changed_when: no - -- name: Generating PersistentVolumeClaims - Dynamic -  template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml -  vars: -    obj_name: "{{claim_name}}" -    annotations: -      volume.alpha.kubernetes.io/storage-class: "dynamic" -    size: "{{es_pvc_size}}" -    access_modes: "{{ es_access_modes | list }}" -    pv_selector: "{{es_pv_selector}}" -  with_items: -    - "{{es_pvc_pool|default([])}}" -  loop_control: -    loop_var: claim_name -  when: -    - es_pvc_dynamic -    - es_pvc_pool is defined -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml deleted file mode 100644 index 7dc9530df..000000000 --- a/roles/openshift_logging/tasks/generate_rolebindings.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Generate RoleBindings -  template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml -  vars: -    obj_name: logging-elasticsearch-view-role -    roleRef: -      name: view -    subjects: -      - kind: ServiceAccount -        name: aggregated-logging-elasticsearch -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml deleted file mode 100644 index ae9a8e023..000000000 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ /dev/null @@ -1,169 +0,0 @@ ---- -- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }} -  when: openshift_logging_kibana_key | trim | length > 0 -  changed_when: false - -- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode  }} -  when: openshift_logging_kibana_cert | trim | length > 0 -  changed_when: false - -- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode  }} -  when: openshift_logging_kibana_ca | trim | length > 0 -  changed_when: false - -- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }} -  when: kibana_ca is not defined -  changed_when: false - -- name: Generating logging routes -  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-route.yaml -  tags: routes -  vars: -    obj_name: "logging-kibana" -    route_host: "{{openshift_logging_kibana_hostname}}" -    service_name: "logging-kibana" -    tls_key: "{{kibana_key | default('') | b64decode}}" -    tls_cert: "{{kibana_cert | default('') | b64decode}}" -    tls_ca_cert: "{{kibana_ca | b64decode}}" -    tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" -    edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}" -    labels: -      component: support -      logging-infra: support -      provider: openshift -  changed_when: no - -- set_fact: kibana_ops_key={{ lookup('file', openshift_logging_kibana_ops_key) | b64encode }} -  when: -  - openshift_logging_use_ops | bool -  - "{{ openshift_logging_kibana_ops_key | trim | length > 0 }}" -  changed_when: false - -- set_fact: kibana_ops_cert={{ lookup('file', openshift_logging_kibana_ops_cert)| b64encode  }} -  when: -  - openshift_logging_use_ops | bool -  - "{{openshift_logging_kibana_ops_cert | trim | length > 0}}" -  changed_when: false - -- set_fact: kibana_ops_ca={{ lookup('file', openshift_logging_kibana_ops_ca)| b64encode  }} -  when: -  - openshift_logging_use_ops | bool -  - "{{openshift_logging_kibana_ops_ca | trim | length > 0}}" -  changed_when: false - -- set_fact: kibana_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }} -  when: -  - openshift_logging_use_ops | bool -  - kibana_ops_ca is not defined -  changed_when: false - -- name: Generating logging ops routes -  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-ops-route.yaml -  tags: routes -  vars: -    obj_name: "logging-kibana-ops" -    route_host: "{{openshift_logging_kibana_ops_hostname}}" -    service_name: "logging-kibana-ops" -    tls_key: "{{kibana_ops_key | default('') | b64decode}}" -    tls_cert: "{{kibana_ops_cert | default('') | b64decode}}" -    tls_ca_cert: "{{kibana_ops_ca | b64decode}}" -    tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" -    edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}" -    labels: -      component: support -      logging-infra: support -      provider: openshift -  when: openshift_logging_use_ops | bool -  changed_when: no - -- set_fact: es_key={{ lookup('file', openshift_logging_es_key) | b64encode }} -  when: -  - openshift_logging_es_key | trim | length > 0 -  - openshift_logging_es_allow_external | bool -  changed_when: false - -- set_fact: es_cert={{ lookup('file', openshift_logging_es_cert)| b64encode  }} -  when: -  - openshift_logging_es_cert | trim | length > 0 -  - openshift_logging_es_allow_external | bool -  changed_when: false - -- set_fact: es_ca={{ lookup('file', openshift_logging_es_ca_ext)| b64encode  }} -  when: -  - openshift_logging_es_ca_ext | trim | length > 0 -  - openshift_logging_es_allow_external | bool -  changed_when: false - -- set_fact: es_ca={{key_pairs | entry_from_named_pair('ca_file') }} -  when: -  - es_ca is not defined -  - openshift_logging_es_allow_external | bool -  changed_when: false - -- name: Generating Elasticsearch logging routes -  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-es-route.yaml -  tags: routes -  vars: -    obj_name: "logging-es" -    route_host: "{{openshift_logging_es_hostname}}" -    service_name: "logging-es" -    tls_key: "{{es_key | default('') | b64decode}}" -    tls_cert: "{{es_cert | default('') | b64decode}}" -    tls_ca_cert: "{{es_ca | b64decode}}" -    tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" -    edge_term_policy: "{{openshift_logging_es_edge_term_policy | default('') }}" -    labels: -      component: support -      logging-infra: support -      provider: openshift -  changed_when: no -  when: openshift_logging_es_allow_external | bool - -- set_fact: es_ops_key={{ lookup('file', openshift_logging_es_ops_key) | b64encode }} -  when: -  - openshift_logging_es_ops_allow_external | bool -  - openshift_logging_use_ops | bool -  - "{{ openshift_logging_es_ops_key | trim | length > 0 }}" -  changed_when: false - -- set_fact: es_ops_cert={{ lookup('file', openshift_logging_es_ops_cert)| b64encode  }} -  when: -  - openshift_logging_es_ops_allow_external | bool -  - openshift_logging_use_ops | bool -  - "{{openshift_logging_es_ops_cert | trim | length > 0}}" -  changed_when: false - -- set_fact: es_ops_ca={{ lookup('file', openshift_logging_es_ops_ca_ext)| b64encode  }} -  when: -  - openshift_logging_es_ops_allow_external | bool -  - openshift_logging_use_ops | bool -  - "{{openshift_logging_es_ops_ca_ext | trim | length > 0}}" -  changed_when: false - -- set_fact: es_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }} -  when: -  - openshift_logging_es_ops_allow_external | bool -  - openshift_logging_use_ops | bool -  - es_ops_ca is not defined -  changed_when: false - -- name: Generating Elasticsearch logging ops routes -  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-es-ops-route.yaml -  tags: routes -  vars: -    obj_name: "logging-es-ops" -    route_host: "{{openshift_logging_es_ops_hostname}}" -    service_name: "logging-es-ops" -    tls_key: "{{es_ops_key | default('') | b64decode}}" -    tls_cert: "{{es_ops_cert | default('') | b64decode}}" -    tls_ca_cert: "{{es_ops_ca | b64decode}}" -    tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" -    edge_term_policy: "{{openshift_logging_es_ops_edge_term_policy | default('') }}" -    labels: -      component: support -      logging-infra: support -      provider: openshift -  when: -  - openshift_logging_es_ops_allow_external | bool -  - openshift_logging_use_ops | bool -  changed_when: no diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml deleted file mode 100644 index b629bd995..000000000 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ /dev/null @@ -1,129 +0,0 @@ ---- -- name: Retrieving the cert to use when generating secrets for the logging components -  slurp: src="{{generated_certs_dir}}/{{item.file}}" -  register: key_pairs -  with_items: -    - { name: "ca_file", file: "ca.crt" } -    - { name: "kibana_key", file: "system.logging.kibana.key"} -    - { name: "kibana_cert", file: "system.logging.kibana.crt"} -    - { name: "curator_key", file: "system.logging.curator.key"} -    - { name: "curator_cert", file: "system.logging.curator.crt"} -    - { name: "fluentd_key", file: "system.logging.fluentd.key"} -    - { name: "fluentd_cert", file: "system.logging.fluentd.crt"} -    - { name: "kibana_internal_key", file: "kibana-internal.key"} -    - { name: "kibana_internal_cert", file: "kibana-internal.crt"} -    - { name: "server_tls", file: "server-tls.json"} - -- name: Generating secrets for logging components -  template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml -  vars: -    secret_name: "logging-{{component}}" -    secret_key_file: "{{component}}_key" -    secret_cert_file: "{{component}}_cert" -    secrets: -      - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} -      - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} -      - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} -    secret_keys: ["ca", "cert", "key"] -  with_items: -    - kibana -    - curator -    - fluentd -  loop_control: -    loop_var: component -  check_mode: no -  changed_when: no - -- name: Retrieving the cert to use when generating secrets for mux -  slurp: src="{{generated_certs_dir}}/{{item.file}}" -  register: mux_key_pairs -  with_items: -    - { name: "ca_file", file: "ca.crt" } -    - { name: "mux_key", file: "system.logging.mux.key"} -    - { name: "mux_cert", file: "system.logging.mux.crt"} -    - { name: "mux_shared_key", file: "mux_shared_key"} -  when: openshift_logging_use_mux - -- name: Generating secrets for mux -  template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml -  vars: -    secret_name: "logging-{{component}}" -    secret_key_file: "{{component}}_key" -    secret_cert_file: "{{component}}_cert" -    secrets: -      - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} -      - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} -      - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} -      - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"} -    secret_keys: ["ca", "cert", "key", "shared_key"] -  with_items: -    - mux -  loop_control: -    loop_var: component -  check_mode: no -  changed_when: no -  when: openshift_logging_use_mux - -- name: Generating secrets for kibana proxy -  template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml -  vars: -    secret_name: logging-kibana-proxy -    secrets: -      - {key: oauth-secret, value: "{{oauth_secret}}"} -      - {key: session-secret, value: "{{session_secret}}"} -      - {key: server-key, value: "{{kibana_key_file}}"} -      - {key: server-cert, value: "{{kibana_cert_file}}"} -      - {key: server-tls.json, value: "{{server_tls_file}}"} -    secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"] -    kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}" -    kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}" -    server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}" -  check_mode: no -  changed_when: no - -- name: Generating secrets for elasticsearch -  command: > -    {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}} -    key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks -    searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks -    admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt -    admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml -  vars: -    secret_name: logging-elasticsearch -    secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key", "searchguard.truststore"] -  register: logging_es_secret -  check_mode: no -  changed_when: no - -- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml -  when: logging_es_secret.stdout is defined -  check_mode: no -  changed_when: no - -- name: Retrieving the cert to use when generating secrets for Elasticsearch external route -  slurp: src="{{generated_certs_dir}}/{{item.file}}" -  register: es_key_pairs -  with_items: -    - { name: "ca_file", file: "ca.crt" } -    - { name: "es_key", file: "system.logging.es.key"} -    - { name: "es_cert", file: "system.logging.es.crt"} -  when: openshift_logging_es_allow_external | bool - -- name: Generating secrets for Elasticsearch external route -  template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml -  vars: -    secret_name: "logging-{{component}}" -    secret_key_file: "{{component}}_key" -    secret_cert_file: "{{component}}_cert" -    secrets: -      - {key: ca, value: "{{es_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} -      - {key: key, value: "{{es_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} -      - {key: cert, value: "{{es_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} -    secret_keys: ["ca", "cert", "key"] -  with_items: -    - es -  loop_control: -    loop_var: component -  check_mode: no -  changed_when: no -  when: openshift_logging_es_allow_external | bool diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml deleted file mode 100644 index 21bcdfecb..000000000 --- a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Generating serviceaccounts -  template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml -  vars: -    obj_name: aggregated-logging-{{component}} -  with_items: -    - elasticsearch -    - kibana -    - fluentd -    - curator -  loop_control: -    loop_var: component -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml deleted file mode 100644 index e3a5c5eb3..000000000 --- a/roles/openshift_logging/tasks/generate_services.yaml +++ /dev/null @@ -1,119 +0,0 @@ ---- -- name: Generating logging-es service -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml -  vars: -    obj_name: logging-es -    ports: -    - {port: 9200, targetPort: restapi} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: es -  check_mode: no -  changed_when: no - -- name: Generating logging-es-cluster service -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml -  vars: -    obj_name: logging-es-cluster -    ports: -    - {port: 9300} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: es -  check_mode: no -  changed_when: no - -- name: Generating logging-kibana service -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml -  vars: -    obj_name: logging-kibana -    ports: -    - {port: 443, targetPort: oaproxy} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: kibana -  check_mode: no -  changed_when: no - -- name: Generating logging-es-ops service -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml -  vars: -    obj_name: logging-es-ops -    ports: -    - {port: 9200, targetPort: restapi} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: es-ops -  when: openshift_logging_use_ops | bool -  check_mode: no -  changed_when: no - -- name: Generating logging-es-ops-cluster service -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml -  vars: -    obj_name: logging-es-ops-cluster -    ports: -    - {port: 9300} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: es-ops -  when: openshift_logging_use_ops | bool -  check_mode: no -  changed_when: no - -- name: Generating logging-kibana-ops service -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml -  vars: -    obj_name: logging-kibana-ops -    ports: -    - {port: 443, targetPort: oaproxy} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: kibana-ops -  when: openshift_logging_use_ops | bool -  check_mode: no -  changed_when: no - -- name: Generating logging-mux service for external connections -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml -  vars: -    obj_name: logging-mux -    ports: -    - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: mux -    externalIPs: -    - "{{ ansible_eth0.ipv4.address }}" -  check_mode: no -  changed_when: no -  when: openshift_logging_mux_allow_external - -- name: Generating logging-mux service for intra-cluster connections -  template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml -  vars: -    obj_name: logging-mux -    ports: -    - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} -    labels: -      logging-infra: support -    selector: -      provider: openshift -      component: mux -  check_mode: no -  changed_when: no -  when: openshift_logging_use_mux and not openshift_logging_mux_allow_external diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml deleted file mode 100644 index ab8e207f1..000000000 --- a/roles/openshift_logging/tasks/install_curator.yaml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Check Curator current replica count -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator -    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} -  register: curator_replica_count -  when: not ansible_check_mode -  ignore_errors: yes -  changed_when: no - -- name: Check Curator ops current replica count -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops -    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} -  register: curator_ops_replica_count -  when: -    - not ansible_check_mode -    - openshift_logging_use_ops | bool -  ignore_errors: yes -  changed_when: no - -- name: Generate curator deploymentconfig -  template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml -  vars: -    component: curator -    logging_component: curator -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" -    es_host: logging-es -    es_port: "{{openshift_logging_es_port}}" -    curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}" -    curator_memory_limit: "{{openshift_logging_curator_memory_limit }}" -    replicas: "{{curator_replica_count.stdout | default (0)}}" -    curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}" -  check_mode: no -  changed_when: no - -- name: Generate OPS curator deploymentconfig -  template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml -  vars: -    component: curator-ops -    logging_component: curator -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" -    es_host: logging-es-ops -    es_port: "{{openshift_logging_es_ops_port}}" -    curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}" -    curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}" -    replicas: "{{curator_ops_replica_count.stdout | default (0)}}" -    curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}" -  when: openshift_logging_use_ops | bool -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml deleted file mode 100644 index a981e7f7f..000000000 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ /dev/null @@ -1,118 +0,0 @@ ---- -- name: Getting current ES deployment size -  set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }} - -- set_fact: openshift_logging_es_pvc_prefix="logging-es" -  when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == '' - -- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }} -  with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} - -### evaluate if the PVC attached to the dc currently matches the provided vars -## if it does then we reuse that pvc in the DC -- include: set_es_storage.yaml -  vars: -    es_component: es -    es_name: "{{ deployment.0 }}" -    es_spec: "{{ deployment.1 }}" -    es_pvc_count: "{{ deployment.2 | int }}" -    es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" -    es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}" -    es_pvc_size: "{{ openshift_logging_es_pvc_size }}" -    es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" -    es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" -    es_pv_selector: "{{ openshift_logging_es_pv_selector }}" -    es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" -    es_memory_limit: "{{ openshift_logging_es_memory_limit }}" -  with_together: -  - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}" -  - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" -  - "{{ es_indices | default([]) }}" -  loop_control: -    loop_var: deployment -## if it does not then we should create one that does and attach it - -## create new dc/pvc is needed -- include: set_es_storage.yaml -  vars: -    es_component: es -    es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" -    es_spec: "{}" -    es_pvc_count: "{{ item | int - 1 }}" -    es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" -    es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}" -    es_pvc_size: "{{ openshift_logging_es_pvc_size }}" -    es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" -    es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" -    es_pv_selector: "{{ openshift_logging_es_pv_selector }}" -    es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" -    es_memory_limit: "{{ openshift_logging_es_memory_limit }}" -  with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }} - -# --------- Tasks for Operation clusters --------- - -- name: Getting current ES deployment size -  set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }} - -- set_fact: openshift_logging_es_ops_pvc_prefix="{{ openshift_logging_es_ops_pvc_prefix | default('logging-es-ops') }}" - -- name: Validate Elasticsearch cluster size for Ops -  fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed" -  vars: -    es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}" -    cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" -  when: -  - openshift_logging_use_ops | bool -  - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}" -  check_mode: no - -- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops" -  when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == '' - -- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }} -  with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} -  when: -  - openshift_logging_use_ops | bool - -- include: set_es_storage.yaml -  vars: -    es_component: es-ops -    es_name: "{{ deployment.0 }}" -    es_spec: "{{ deployment.1 }}" -    es_pvc_count: "{{ deployment.2 | int }}" -    es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" -    es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}" -    es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" -    es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" -    es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" -    es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" -    es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" -    es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" -  with_together: -  - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}" -  - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" -  - "{{ es_ops_indices | default([]) }}" -  loop_control: -    loop_var: deployment -  when: -  - openshift_logging_use_ops | bool -## if it does not then we should create one that does and attach it - -## create new dc/pvc is needed -- include: set_es_storage.yaml -  vars: -    es_component: es-ops -    es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" -    es_spec: "{}" -    es_pvc_count: "{{ item | int - 1 }}" -    es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" -    es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}" -    es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" -    es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" -    es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" -    es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" -    es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" -    es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" -  with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }} -  when: -  - openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml deleted file mode 100644 index 6bc405819..000000000 --- a/roles/openshift_logging/tasks/install_fluentd.yaml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }} -  check_mode: no - -- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }} -  check_mode: no - -- name: Generating Fluentd daemonset -  template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml -  vars: -    daemonset_name: logging-fluentd -    daemonset_component: fluentd -    daemonset_container_name: fluentd-elasticsearch -    daemonset_serviceAccount: aggregated-logging-fluentd -    ops_host: "{{ fluentd_ops_host }}" -    ops_port: "{{ fluentd_ops_port }}" -    fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}" -    fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}" -  check_mode: no -  changed_when: no - -- name: "Check fluentd privileged permissions" -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    get scc/privileged -o jsonpath='{.users}' -  register: fluentd_privileged -  check_mode: no -  changed_when: no - -- name: "Set privileged permissions for fluentd" -  command: > -    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy -    add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd -  register: fluentd_output -  failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr -  check_mode: no -  when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 - -- name: "Check fluentd cluster-reader permissions" -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}' -  register: fluentd_cluster_reader -  check_mode: no -  changed_when: no - -- name: "Set cluster-reader permissions for fluentd" -  command: > -    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy -    add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd -  register: fluentd2_output -  failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr -  check_mode: no -  when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml deleted file mode 100644 index 52bdeb50d..000000000 --- a/roles/openshift_logging/tasks/install_kibana.yaml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- name: Check Kibana current replica count -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana -    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} -  register: kibana_replica_count -  when: not ansible_check_mode -  ignore_errors: yes -  changed_when: no - -- name: Check Kibana ops current replica count -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops -    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} -  register: kibana_ops_replica_count -  when: -    - not ansible_check_mode -    - openshift_logging_use_ops | bool -  ignore_errors: yes -  changed_when: no - - -- name: Generate kibana deploymentconfig -  template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml -  vars: -    component: kibana -    logging_component: kibana -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" -    proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" -    es_host: logging-es -    es_port: "{{openshift_logging_es_port}}" -    kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}" -    kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}" -    kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}" -    kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}" -    replicas: "{{kibana_replica_count.stdout | default (0)}}" -    kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({})}}" -  check_mode: no -  changed_when: no - -- name: Generate OPS kibana deploymentconfig -  template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml -  vars: -    component: kibana-ops -    logging_component: kibana -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" -    proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" -    es_host: logging-es-ops -    es_port: "{{openshift_logging_es_ops_port}}" -    kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}" -    kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}" -    kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}" -    kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}" -    replicas: "{{kibana_ops_replica_count.stdout | default (0)}}" -    kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({})}}" -  when: openshift_logging_use_ops | bool -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index aec455c22..f2d757294 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -2,89 +2,242 @@  - name: Gather OpenShift Logging Facts    openshift_logging_facts:      oc_bin: "{{openshift.common.client_binary}}" -    admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"      openshift_logging_namespace: "{{openshift_logging_namespace}}" -  tags: logging_facts + +- name: Set logging project +  oc_project: +    state: present +    name: "{{ openshift_logging_namespace }}" +    node_selector: "{{ openshift_logging_nodeselector | default(null) }}" + +- name: Labelling logging project +  oc_label: +    state: present +    kind: namespace +    name: "{{ openshift_logging_namespace }}" +    labels: +    - key: "{{ item.key }}" +      value: "{{ item.value }}" +  with_dict: "{{ openshift_logging_labels | default({}) }}" +  when: +  - openshift_logging_labels is defined +  - openshift_logging_labels is dict + +- name: Labelling logging project +  oc_label: +    state: present +    kind: namespace +    name: "{{ openshift_logging_namespace }}" +    labels: +    - key: "{{ openshift_logging_label_key }}" +      value: "{{ openshift_logging_label_value }}" +  when: +  - openshift_logging_label_key is defined +  - openshift_logging_label_key != "" +  - openshift_logging_label_value is defined + +- name: Create logging cert directory +  file: +    path: "{{ openshift.common.config_base }}/logging" +    state: directory +    mode: 0755 +  changed_when: False    check_mode: no -- name: Validate Elasticsearch cluster size -  fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this." -  when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int - -- name: Validate Elasticsearch Ops cluster size -  fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this." -  when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int - -- name: Install logging -  include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml" -  when: openshift_hosted_logging_install | default(true) | bool -  with_items: -    - support -    - elasticsearch -    - kibana -    - curator -    - fluentd -  loop_control: -    loop_var: install_component - -- name: Install logging mux -  include: "{{ role_path }}/tasks/install_mux.yaml" -  when: openshift_logging_use_mux - -- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml -  register: object_def_files -  changed_when: no - -- slurp: src={{item}} -  register: object_defs -  with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}" -  changed_when: no - -- name: Create objects -  include: oc_apply.yaml +- include: generate_certs.yaml    vars: -    - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" -    - namespace: "{{ openshift_logging_namespace }}" -    - file_name: "{{ file.source }}" -    - file_content: "{{ file.content | b64decode | from_yaml }}" -  with_items: "{{ object_defs.results }}" -  loop_control: -    loop_var: file -  when: not ansible_check_mode +    generated_certs_dir: "{{openshift.common.config_base}}/logging" -- include: update_master_config.yaml +## Elasticsearch + +- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }} +  with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} +  when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0 + +- set_fact: es_indices=[] +  when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count == 0 + +# We don't allow scaling down of ES nodes currently +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" + +    openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}" +    openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" +    openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" +    openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + +  with_together: +  - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}" +  - "{{ openshift_logging_facts.elasticsearch.pvcs }}" +  - "{{ es_indices }}" + +# Create any new DC that may be required +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}" +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" + +    openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}" +    openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" +    openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" +    openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + +  with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} + +- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }} +  with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} +  when: +  - openshift_logging_use_ops | bool +  - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0 + +- set_fact: es_ops_indices=[] +  when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count == 0 + + +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}" +    openshift_logging_elasticsearch_ops_deployment: true +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" + +    #openshift_logging_elasticsearch_storage_type: "{{ }}" +    openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" +    openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" +    openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" -- name: Printing out objects to create -  debug: msg={{file.content | b64decode }} -  with_items: "{{ object_defs.results }}" -  loop_control: -    loop_var: file -  when: ansible_check_mode - -  # TODO replace task with oc_secret module that supports -  # linking when available -- name: Link Pull Secrets With Service Accounts -  include: oc_secret.yaml +  with_together: +  - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}" +  - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" +  - "{{ es_ops_indices }}" +  when: +  - openshift_logging_use_ops | bool + +# Create any new DC that may be required +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}" +    openshift_logging_elasticsearch_ops_deployment: true +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" + +    openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}" +    openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" +    openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" +    openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + +  with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} +  when: +  - openshift_logging_use_ops | bool + + +## Kibana +- include_role: +    name: openshift_logging_kibana +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" +    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}" +    openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}" +    openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}" +    openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" + + +- include_role: +    name: openshift_logging_kibana +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_kibana_ops_deployment: true +    openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" +    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" +    openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}" +    openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}" +    openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}" +    openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}" +    openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}" +    openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}" +    openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}" +    openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}" +    openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}" +    openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}" +    openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}" +    openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}" +    openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}" +  when: +  - openshift_logging_use_ops | bool + + +## Curator +- include_role: +    name: openshift_logging_curator +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" + +- include_role: +    name: openshift_logging_curator +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_curator_ops_deployment: true +    openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" +    openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}" +    openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}" +    openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}" +  when: +  - openshift_logging_use_ops | bool + +## Mux +- include_role: +    name: openshift_logging_mux    vars: -    kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" -    subcommand: link -    service_account: "{{sa_account}}" -    secret_name: "{{openshift_logging_image_pull_secret}}" -    add_args: "--for=pull" -  with_items: -    - default -    - aggregated-logging-elasticsearch -    - aggregated-logging-kibana -    - aggregated-logging-fluentd -    - aggregated-logging-curator -  register: link_pull_secret -  loop_control: -    loop_var: sa_account +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_mux_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}" +    openshift_logging_mux_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_mux_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_mux_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_mux_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"    when: -    - openshift_logging_image_pull_secret is defined -    - openshift_logging_image_pull_secret != '' -  failed_when: link_pull_secret.rc != 0 +  - openshift_logging_use_mux | bool + -- name: Scaling up cluster -  include: start_cluster.yaml -  when: start_cluster | default(true) | bool +## Fluentd +- include_role: +    name: openshift_logging_fluentd +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}" +    openshift_logging_fluentd_use_journal: "{{ openshift.docker.options | search('journald') }}" +    openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_fluentd_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" +    openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}" + +- include: update_master_config.yaml diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml deleted file mode 100644 index 91eeb95a1..000000000 --- a/roles/openshift_logging/tasks/install_mux.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }} -  check_mode: no - -- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }} -  check_mode: no - -- name: Check mux current replica count -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux -    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} -  register: mux_replica_count -  when: not ansible_check_mode -  ignore_errors: yes -  changed_when: no - -- name: Generating mux deploymentconfig -  template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml -  vars: -    component: mux -    logging_component: mux -    deploy_name: "logging-{{component}}" -    image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}" -    es_host: logging-es -    es_port: "{{openshift_logging_es_port}}" -    ops_host: "{{ mux_ops_host }}" -    ops_port: "{{ mux_ops_port }}" -    mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}" -    mux_memory_limit: "{{openshift_logging_mux_memory_limit}}" -    replicas: "{{mux_replica_count.stdout | default (0)}}" -    mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}" -  check_mode: no -  changed_when: no - -- name: "Check mux hostmount-anyuid permissions" -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    get scc/hostmount-anyuid -o jsonpath='{.users}' -  register: mux_hostmount_anyuid -  check_mode: no -  changed_when: no - -- name: "Set hostmount-anyuid permissions for mux" -  command: > -    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy -    add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd -  register: mux_output -  failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr -  check_mode: no -  when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 - -- name: "Check mux cluster-reader permissions" -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}' -  register: mux_cluster_reader -  check_mode: no -  changed_when: no - -- name: "Set cluster-reader permissions for mux" -  command: > -    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy -    add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd -  register: mux2_output -  failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr -  check_mode: no -  when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml deleted file mode 100644 index 877ce3149..000000000 --- a/roles/openshift_logging/tasks/install_support.yaml +++ /dev/null @@ -1,73 +0,0 @@ ---- -# This is the base configuration for installing the other components -- name: Set logging project -  oc_project: -    state: present -    name: "{{ openshift_logging_namespace }}" -    node_selector: "{{ openshift_logging_nodeselector | default(null) }}" - -- name: Labelling logging project -  oc_label: -    state: present -    kind: namespace -    name: "{{ openshift_logging_namespace }}" -    labels: -    - key: "{{ item.key }}" -      value: "{{ item.value }}" -  with_dict: "{{ openshift_logging_labels | default({}) }}" -  when: -  - openshift_logging_labels is defined -  - openshift_logging_labels is dict - -- name: Labelling logging project -  oc_label: -    state: present -    kind: namespace -    name: "{{ openshift_logging_namespace }}" -    labels: -    - key: "{{ openshift_logging_label_key }}" -      value: "{{ openshift_logging_label_value }}" -  when: -  - openshift_logging_label_key is defined -  - openshift_logging_label_key != "" -  - openshift_logging_label_value is defined - -- name: Create logging cert directory -  file: path={{openshift.common.config_base}}/logging state=directory mode=0755 -  changed_when: False -  check_mode: no - -- include: generate_certs.yaml -  vars: -    generated_certs_dir: "{{openshift.common.config_base}}/logging" - -- name: Create temp directory for all our templates -  file: path={{mktemp.stdout}}/templates state=directory mode=0755 -  changed_when: False -  check_mode: no - -- include: generate_secrets.yaml -  vars: -    generated_certs_dir: "{{openshift.common.config_base}}/logging" - -- include: generate_configmaps.yaml - -- include: generate_services.yaml - -- name: Generate kibana-proxy oauth client -  template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml -  vars: -    secret: "{{oauth_secret}}" -  when: oauth_secret is defined -  check_mode: no -  changed_when: no - -- include: generate_clusterroles.yaml - -- include: generate_rolebindings.yaml - -- include: generate_clusterrolebindings.yaml - -- include: generate_serviceaccounts.yaml - -- include: generate_routes.yaml diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 3d8cd3410..f475024dd 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -30,33 +30,12 @@    check_mode: no    become: no -- debug: msg="Created local temp dir {{local_tmp.stdout}}" - -- name: Copy the admin client config(s) -  command: > -    cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig -  changed_when: False -  check_mode: no -  tags: logging_init -  - include: "{{ role_path }}/tasks/install_logging.yaml"    when: openshift_logging_install_logging | default(false) | bool -- include: "{{ role_path }}/tasks/upgrade_logging.yaml" -  when: openshift_logging_upgrade_logging | default(false) | bool -  - include: "{{ role_path }}/tasks/delete_logging.yaml"    when:      - not openshift_logging_install_logging | default(false) | bool -    - not openshift_logging_upgrade_logging | default(false) | bool - -- name: Delete temp directory -  file: -    name: "{{ mktemp.stdout }}" -    state: absent -  tags: logging_cleanup -  changed_when: False -  check_mode: no  - name: Cleaning up local temp dir    local_action: file path="{{local_tmp.stdout}}" state=absent diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml deleted file mode 100644 index a0ed56ebd..000000000 --- a/roles/openshift_logging/tasks/oc_apply.yaml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- oc_obj: -    kind: "{{ file_content.kind }}" -    name: "{{ file_content.metadata.name }}" -    state: present -    namespace: "{{ namespace }}" -    files: -    - "{{ file_name }}" -  when: file_content.kind not in ["Service", "Route"] - -## still need to do this for services until the template logic is replaced by oc_* -- block: -  - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} -    command: > -      {{ openshift.common.client_binary }} -      --config={{ kubeconfig }} -      get {{file_content.kind}} {{file_content.metadata.name}} -      -o jsonpath='{.metadata.resourceVersion}' -      -n {{namespace}} -    register: generation_init -    failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''" -    changed_when: no - -  - name: Applying {{file_name}} -    command: > -      {{ openshift.common.client_binary }} --config={{ kubeconfig }} -      apply -f {{ file_name }} -      -n {{ namespace }} -    register: generation_apply -    failed_when: "'error' in generation_apply.stderr" -    changed_when: no - -  - name: Removing previous {{file_name}} -    command: > -      {{ openshift.common.client_binary }} --config={{ kubeconfig }} -      delete -f {{ file_name }} -      -n {{ namespace }} -    register: generation_delete -    failed_when: "'error' in generation_delete.stderr" -    changed_when: generation_delete.rc == 0 -    when: "'field is immutable' in generation_apply.stderr" - -  - name: Recreating {{file_name}} -    command: > -      {{ openshift.common.client_binary }} --config={{ kubeconfig }} -      apply -f {{ file_name }} -      -n {{ namespace }} -    register: generation_apply -    failed_when: "'error' in generation_apply.stderr" -    changed_when: generation_apply.rc == 0 -    when: "'field is immutable' in generation_apply.stderr" -  when: file_content.kind in ["Service", "Route"] diff --git a/roles/openshift_logging/tasks/oc_secret.yaml b/roles/openshift_logging/tasks/oc_secret.yaml deleted file mode 100644 index de37e4f6d..000000000 --- a/roles/openshift_logging/tasks/oc_secret.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- command: > -    {{ openshift.common.client_binary }} -    --config={{ kubeconfig }} -    secret {{subcommand}} {{service_account}} {{secret_name}} -    {{add_args}} -    -n {{openshift_logging_namespace}} diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml deleted file mode 100644 index 4afe4e641..000000000 --- a/roles/openshift_logging/tasks/set_es_storage.yaml +++ /dev/null @@ -1,80 +0,0 @@ ---- -- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}" -  when: es_spec.volumes is defined - -- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}" -  when: -  - es_spec.volumes is defined -  - es_storage_type.persistentVolumeClaim is defined - -- set_fact: es_storage_claim="" -  when: -  - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined - -## take an ES dc and evaluate its storage option -# if it is a hostmount or emptydir we don't do anything with it -# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist) -- oc_obj: -    state: list -    kind: pvc -    name: "{{ es_storage_claim }}" -    namespace: "{{ openshift_logging_namespace }}" -  register: pvc_spec -  failed_when: pvc_spec.results.stderr is defined -  when: -  - es_spec.volumes is defined -  - es_storage_type.persistentVolumeClaim is defined - -- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}" -  when: -  - pvc_spec.results is defined -  - pvc_spec.results.results[0].spec is defined - -# if not create the pvc and use it -- block: - -  - name: Generating PersistentVolumeClaims -    template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml -    vars: -      obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" -      size: "{{ es_pvc_size }}" -      access_modes: "{{ openshift_logging_storage_access_modes }}" -      pv_selector: "{{ es_pv_selector }}" -    when: not es_pvc_dynamic | bool -    check_mode: no -    changed_when: no - -  - name: Generating PersistentVolumeClaims - Dynamic -    template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml -    vars: -      obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" -      annotations: -        volume.alpha.kubernetes.io/storage-class: "dynamic" -      size: "{{ es_pvc_size }}" -      access_modes: "{{ openshift_logging_storage_access_modes }}" -      pv_selector: "{{ es_pv_selector }}" -    when: es_pvc_dynamic | bool -    check_mode: no -    changed_when: no - -  - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" - -  when: -  - es_pvc_size | search('^\d.*') -  - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) ) - -- name: Generate Elasticsearch DeploymentConfig -  template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml -  vars: -    component: "{{ es_component }}" -    deploy_name: "{{ es_name }}" -    logging_component: elasticsearch -    deploy_name_prefix: "logging-{{ es_component }}" -    image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" -    es_cluster_name: "{{component}}" -    es_cpu_limit: "{{ es_cpu_limit }}" -    es_memory_limit: "{{ es_memory_limit }}" -    es_node_selector: "{{ es_node_selector }}" -    es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}" -  check_mode: no -  changed_when: no diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml deleted file mode 100644 index c1592b830..000000000 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ /dev/null @@ -1,156 +0,0 @@ ---- -- name: Retrieve list of fluentd hosts -  oc_obj: -    state: list -    kind: node -  when: "'--all' in openshift_logging_fluentd_hosts" -  register: fluentd_hosts - -- name: Set fact openshift_logging_fluentd_hosts -  set_fact: -    openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  when: "'--all' in openshift_logging_fluentd_hosts" - -- name: start fluentd -  oc_label: -    name: "{{ fluentd_host }}" -    kind: node -    state: add -    labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" -  with_items: "{{ openshift_logging_fluentd_hosts }}" -  loop_control: -    loop_var: fluentd_host - -- name: Retrieve mux -  oc_obj: -    state: list -    kind: dc -    selector: "component=mux" -    namespace: "{{openshift_logging_namespace}}" -  register: mux_dc -  when: openshift_logging_use_mux - -- name: start mux -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: "{{ openshift_logging_mux_replica_count | default (1) }}" -  with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}" -  loop_control: -    loop_var: object -  when: -  - mux_dc.results is defined -  - mux_dc.results.results is defined -  - openshift_logging_use_mux - -- name: Retrieve elasticsearch -  oc_obj: -    state: list -    kind: dc -    selector: "component=es" -    namespace: "{{openshift_logging_namespace}}" -  register: es_dc - -- name: start elasticsearch -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 1 -  with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object - -- name: Retrieve kibana -  oc_obj: -    state: list -    kind: dc -    selector: "component=kibana" -    namespace: "{{openshift_logging_namespace}}" -  register: kibana_dc - -- name: start kibana -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}" -  with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object - -- name: Retrieve curator -  oc_obj: -    state: list -    kind: dc -    selector: "component=curator" -    namespace: "{{openshift_logging_namespace}}" -  register: curator_dc - -- name: start curator -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 1 -  with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object - -- name: Retrieve elasticsearch-ops -  oc_obj: -    state: list -    kind: dc -    selector: "component=es-ops" -    namespace: "{{openshift_logging_namespace}}" -  register: es_dc - -- name: start elasticsearch-ops -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 1 -  with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object -  when: openshift_logging_use_ops | bool - -- name: Retrieve kibana-ops -  oc_obj: -    state: list -    kind: dc -    selector: "component=kibana-ops" -    namespace: "{{openshift_logging_namespace}}" -  register: kibana_dc - -- name: start kibana-ops -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}" -  with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object -  when: openshift_logging_use_ops | bool - -- name: Retrieve curator -  oc_obj: -    state: list -    kind: dc -    selector: "component=curator-ops" -    namespace: "{{openshift_logging_namespace}}" -  register: curator_dc - -- name: start curator-ops -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 1 -  with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object -  when: openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml deleted file mode 100644 index f4b419d84..000000000 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ /dev/null @@ -1,153 +0,0 @@ ---- -- name: Retrieve list of fluentd hosts -  oc_obj: -    state: list -    kind: node -  when: "'--all' in openshift_logging_fluentd_hosts" -  register: fluentd_hosts - -- name: Set fact openshift_logging_fluentd_hosts -  set_fact: -    openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  when: "'--all' in openshift_logging_fluentd_hosts" - -- name: stop fluentd -  oc_label: -    name: "{{ fluentd_host }}" -    kind: node -    state: absent -    labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" -  with_items: "{{ openshift_logging_fluentd_hosts }}" -  loop_control: -    loop_var: fluentd_host - -- name: Retrieve mux -  oc_obj: -    state: list -    kind: dc -    selector: "component=mux" -    namespace: "{{openshift_logging_namespace}}" -  register: mux_dc -  when: openshift_logging_use_mux - -- name: stop mux -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 0 -  with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}" -  loop_control: -    loop_var: object -  when: openshift_logging_use_mux - -- name: Retrieve elasticsearch -  oc_obj: -    state: list -    kind: dc -    selector: "component=es" -    namespace: "{{openshift_logging_namespace}}" -  register: es_dc - -- name: stop elasticsearch -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 0 -  with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object - -- name: Retrieve kibana -  oc_obj: -    state: list -    kind: dc -    selector: "component=kibana" -    namespace: "{{openshift_logging_namespace}}" -  register: kibana_dc - -- name: stop kibana -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 0 -  with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object - -- name: Retrieve curator -  oc_obj: -    state: list -    kind: dc -    selector: "component=curator" -    namespace: "{{openshift_logging_namespace}}" -  register: curator_dc - -- name: stop curator -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 0 -  with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object - -- name: Retrieve elasticsearch-ops -  oc_obj: -    state: list -    kind: dc -    selector: "component=es-ops" -    namespace: "{{openshift_logging_namespace}}" -  register: es_dc - -- name: stop elasticsearch-ops -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 0 -  with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object -  when: openshift_logging_use_ops | bool - -- name: Retrieve kibana-ops -  oc_obj: -    state: list -    kind: dc -    selector: "component=kibana-ops" -    namespace: "{{openshift_logging_namespace}}" -  register: kibana_dc - -- name: stop kibana-ops -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 0 -  with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object -  when: openshift_logging_use_ops | bool - -- name: Retrieve curator -  oc_obj: -    state: list -    kind: dc -    selector: "component=curator-ops" -    namespace: "{{openshift_logging_namespace}}" -  register: curator_dc - -- name: stop curator-ops -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 0 -  with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object -  when: openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml deleted file mode 100644 index 30fdbd2af..000000000 --- a/roles/openshift_logging/tasks/upgrade_logging.yaml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- name: Stop the Cluster -  include: stop_cluster.yaml - -- name: Upgrade logging -  include: install_logging.yaml -  vars: -    start_cluster: False - -# start ES so that we can run migrate script -- name: Retrieve elasticsearch -  oc_obj: -    state: list -    kind: dc -    selector: "component=es" -    namespace: "{{openshift_logging_namespace}}" -  register: es_dc - -- name: start elasticsearch -  oc_scale: -    kind: dc -    name: "{{ object }}" -    namespace: "{{openshift_logging_namespace}}" -    replicas: 1 -  with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" -  loop_control: -    loop_var: object - -- name: Wait for pods to start -  oc_obj: -    state: list -    kind: pods -    selector: "component=es" -    namespace: "{{openshift_logging_namespace}}" -  register: running_pod -  until: running_pod.results.results[0]['items'] | selectattr('status.phase', 'match', '^Running$') | map(attribute='metadata.name') | list | length != 0 -  retries: 30 -  delay: 10 - -- name: Run upgrade script -  script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}} -  register: script_output -  changed_when: -    - script_output.rc == 0 -    - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1 - -- name: Start up rest of cluster -  include: start_cluster.yaml diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2 deleted file mode 100644 index 0d28db48e..000000000 --- a/roles/openshift_logging/templates/clusterrole.j2 +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: ClusterRole -metadata: -  name: {{obj_name}} -rules: -{% for rule in rules %} -- resources: -{% for kind in rule.resources %} -    - {{ kind }} -{% endfor %} -  apiGroups: -{% if rule.api_groups is defined %} -{% for group in rule.api_groups %} -    - {{ group }} -{% endfor %} -{% endif %} -  verbs: -{% for verb in rule.verbs %} -    - {{ verb }} -{% endfor %} -{% endfor %} diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_logging/templates/clusterrolebinding.j2 deleted file mode 100644 index 2d25ff1fb..000000000 --- a/roles/openshift_logging/templates/clusterrolebinding.j2 +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: ClusterRoleBinding -metadata: -  name: {{obj_name}} -{% if crb_usernames is defined %} -userNames: -{% for name in crb_usernames %} -  - {{ name }} -{% endfor %} -{% endif %} -{% if crb_groupnames is defined %} -groupNames: -{% for name in crb_groupnames %} -  - {{ name }} -{% endfor %} -{% endif %} -subjects: -{% for sub in subjects %} -  - kind: {{ sub.kind }} -    name: {{ sub.name }} -    namespace: {{sub.namespace}} -{% endfor %} -roleRef: -  name: {{obj_name}} diff --git a/roles/openshift_logging/templates/es-storage-emptydir.partial b/roles/openshift_logging/templates/es-storage-emptydir.partial deleted file mode 100644 index ccd01a816..000000000 --- a/roles/openshift_logging/templates/es-storage-emptydir.partial +++ /dev/null @@ -1 +0,0 @@ -          emptyDir: {} diff --git a/roles/openshift_logging/templates/es-storage-hostpath.partial b/roles/openshift_logging/templates/es-storage-hostpath.partial deleted file mode 100644 index 07ddad9ba..000000000 --- a/roles/openshift_logging/templates/es-storage-hostpath.partial +++ /dev/null @@ -1,2 +0,0 @@ -          hostPath: -            path: {{es_storage['path']}} diff --git a/roles/openshift_logging/templates/es-storage-pvc.partial b/roles/openshift_logging/templates/es-storage-pvc.partial deleted file mode 100644 index fcbff68de..000000000 --- a/roles/openshift_logging/templates/es-storage-pvc.partial +++ /dev/null @@ -1,2 +0,0 @@ -          persistentVolumeClaim: -            claimName: {{es_storage['pvc_claim']}} diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 deleted file mode 100644 index 5c93d823e..000000000 --- a/roles/openshift_logging/templates/fluentd.j2 +++ /dev/null @@ -1,167 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: "DaemonSet" -metadata: -  name: "{{daemonset_name}}" -  labels: -    provider: openshift -    component: "{{daemonset_component}}" -    logging-infra: "{{daemonset_component}}" -spec: -  selector: -    matchLabels: -      provider: openshift -      component: "{{daemonset_component}}" -  updateStrategy: -    type: RollingUpdate -    rollingUpdate: -      minReadySeconds: 600 -  template: -    metadata: -      name: "{{daemonset_container_name}}" -      labels: -        logging-infra: "{{daemonset_component}}" -        provider: openshift -        component: "{{daemonset_component}}" -    spec: -      serviceAccountName: "{{daemonset_serviceAccount}}" -      nodeSelector: -        {{fluentd_nodeselector_key}}: "{{fluentd_nodeselector_value}}" -      containers: -      - name: "{{daemonset_container_name}}" -        image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}" -        imagePullPolicy: Always -        securityContext: -          privileged: true -        resources: -          limits: -            cpu: {{openshift_logging_fluentd_cpu_limit}} -            memory: {{openshift_logging_fluentd_memory_limit}} -        volumeMounts: -        - name: runlogjournal -          mountPath: /run/log/journal -        - name: varlog -          mountPath: /var/log -        - name: varlibdockercontainers -          mountPath: /var/lib/docker/containers -          readOnly: true -        - name: config -          mountPath: /etc/fluent/configs.d/user -          readOnly: true -        - name: certs -          mountPath: /etc/fluent/keys -          readOnly: true -        - name: dockerhostname -          mountPath: /etc/docker-hostname -          readOnly: true -        - name: localtime -          mountPath: /etc/localtime -          readOnly: true -        - name: dockercfg -          mountPath: /etc/sysconfig/docker -          readOnly: true -        - name: dockerdaemoncfg -          mountPath: /etc/docker -          readOnly: true -{% if openshift_logging_use_mux_client | bool %} -        - name: muxcerts -          mountPath: /etc/fluent/muxkeys -          readOnly: true -{% endif %} -        env: -        - name: "K8S_HOST_URL" -          value: "{{openshift_logging_master_url}}" -        - name: "ES_HOST" -          value: "{{openshift_logging_es_host}}" -        - name: "ES_PORT" -          value: "{{openshift_logging_es_port}}" -        - name: "ES_CLIENT_CERT" -          value: "{{openshift_logging_es_client_cert}}" -        - name: "ES_CLIENT_KEY" -          value: "{{openshift_logging_es_client_key}}" -        - name: "ES_CA" -          value: "{{openshift_logging_es_ca}}" -        - name: "OPS_HOST" -          value: "{{ops_host}}" -        - name: "OPS_PORT" -          value: "{{ops_port}}" -        - name: "OPS_CLIENT_CERT" -          value: "{{openshift_logging_es_ops_client_cert}}" -        - name: "OPS_CLIENT_KEY" -          value: "{{openshift_logging_es_ops_client_key}}" -        - name: "OPS_CA" -          value: "{{openshift_logging_es_ops_ca}}" -        - name: "ES_COPY" -          value: "{{openshift_logging_fluentd_es_copy|lower}}" -        - name: "ES_COPY_HOST" -          value: "{{es_copy_host | default('')}}" -        - name: "ES_COPY_PORT" -          value: "{{es_copy_port | default('')}}" -        - name: "ES_COPY_SCHEME" -          value: "{{es_copy_scheme | default('https')}}" -        - name: "ES_COPY_CLIENT_CERT" -          value: "{{es_copy_client_cert | default('')}}" -        - name: "ES_COPY_CLIENT_KEY" -          value: "{{es_copy_client_key | default('')}}" -        - name: "ES_COPY_CA" -          value: "{{es_copy_ca | default('')}}" -        - name: "ES_COPY_USERNAME" -          value: "{{es_copy_username | default('')}}" -        - name: "ES_COPY_PASSWORD" -          value: "{{es_copy_password | default('')}}" -        - name: "OPS_COPY_HOST" -          value: "{{ops_copy_host | default('')}}" -        - name: "OPS_COPY_PORT" -          value: "{{ops_copy_port | default('')}}" -        - name: "OPS_COPY_SCHEME" -          value: "{{ops_copy_scheme | default('https')}}" -        - name: "OPS_COPY_CLIENT_CERT" -          value: "{{ops_copy_client_cert | default('')}}" -        - name: "OPS_COPY_CLIENT_KEY" -          value: "{{ops_copy_client_key | default('')}}" -        - name: "OPS_COPY_CA" -          value: "{{ops_copy_ca | default('')}}" -        - name: "OPS_COPY_USERNAME" -          value: "{{ops_copy_username | default('')}}" -        - name: "OPS_COPY_PASSWORD" -          value: "{{ops_copy_password | default('')}}" -        - name: "USE_JOURNAL" -          value: "{{openshift_logging_fluentd_use_journal|lower}}" -        - name: "JOURNAL_SOURCE" -          value: "{{openshift_logging_fluentd_journal_source | default('')}}" -        - name: "JOURNAL_READ_FROM_HEAD" -          value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" -        - name: "USE_MUX_CLIENT" -          value: "{{openshift_logging_use_mux_client| default('false')}}" -      volumes: -      - name: runlogjournal -        hostPath: -          path: /run/log/journal -      - name: varlog -        hostPath: -          path: /var/log -      - name: varlibdockercontainers -        hostPath: -          path: /var/lib/docker/containers -      - name: config -        configMap: -          name: logging-fluentd -      - name: certs -        secret: -          secretName: logging-fluentd -      - name: dockerhostname -        hostPath: -          path: /etc/hostname -      - name: localtime -        hostPath: -          path: /etc/localtime -      - name: dockercfg -        hostPath: -          path: /etc/sysconfig/docker -      - name: dockerdaemoncfg -        hostPath: -          path: /etc/docker -{% if openshift_logging_use_mux_client | bool %} -      - name: muxcerts -        secret: -          secretName: logging-mux -{% endif %} diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2 deleted file mode 100644 index eba4197da..000000000 --- a/roles/openshift_logging/templates/secret.j2 +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: -  name: "{{secret_name}}" -type: Opaque -data: -{% for s in secrets %} -  "{{s.key}}" : "{{s.value | b64encode}}" -{% endfor %} diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2 deleted file mode 100644 index 70644a39c..000000000 --- a/roles/openshift_logging/templates/service.j2 +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: "v1" -kind: "Service" -metadata: -  name: "{{obj_name}}" -{% if labels is defined%} -  labels: -{% for key, value in labels.iteritems() %} -    {{key}}: {{value}} -{% endfor %} -{% endif %} -spec: -  ports: -{% for port in ports %} -  - -{% for key, value in port.iteritems() %} -    {{key}}: {{value}} -{% endfor %} -{% if port.targetPort is undefined %} -    clusterIP: "None" -{% endif %} -{% endfor %} -{% if service_targetPort is defined %} -    targetPort: {{service_targetPort}} -{% endif %} -  selector: -  {% for key, value in selector.iteritems() %} -  {{key}}: {{value}} -  {% endfor %} -{% if externalIPs is defined -%} -  externalIPs: -{% for ip in externalIPs %} -  - {{ ip }} -{% endfor %} -{% endif %} diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_logging/templates/serviceaccount.j2 deleted file mode 100644 index b22acc594..000000000 --- a/roles/openshift_logging/templates/serviceaccount.j2 +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: -  name: {{obj_name}} -{% if labels is defined%} -  labels: -{% for key, value in labels.iteritems() %} -    {{key}}: {{value}} -{% endfor %} -{% endif %} -{% if secrets is defined %} -secrets: -{% for name in secrets %} -- name: {{ name }} -{% endfor %} -{% endif %} diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml new file mode 100644 index 000000000..82ffb2f93 --- /dev/null +++ b/roles/openshift_logging_curator/defaults/main.yml @@ -0,0 +1,33 @@ +--- +### General logging settings +openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local" + +openshift_logging_curator_namespace: logging + +### Common settings +openshift_logging_curator_nodeselector: "" +openshift_logging_curator_cpu_limit: 100m +openshift_logging_curator_memory_limit: null + +openshift_logging_curator_es_host: "logging-es" +openshift_logging_curator_es_port: 9200 + +# This should not exceed 1, should check for this +openshift_logging_curator_replicas: 1 + +# this is used to determine if this is an operations deployment or a non-ops deployment +# simply used for naming purposes +openshift_logging_curator_ops_deployment: false + +openshift_logging_curator_default_days: 30 +openshift_logging_curator_run_hour: 0 +openshift_logging_curator_run_minute: 0 +openshift_logging_curator_run_timezone: UTC +openshift_logging_curator_script_log_level: INFO +openshift_logging_curator_log_level: ERROR + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#curator_config_contents: diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging_curator/files/curator.yml index 8d62d8e7d..8d62d8e7d 100644 --- a/roles/openshift_logging/files/curator.yml +++ b/roles/openshift_logging_curator/files/curator.yml diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml new file mode 100644 index 000000000..6752fb7f9 --- /dev/null +++ b/roles/openshift_logging_curator/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Curator Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_curator/tasks/determine_version.yaml b/roles/openshift_logging_curator/tasks/determine_version.yaml new file mode 100644 index 000000000..94f8b4a97 --- /dev/null +++ b/roles/openshift_logging_curator/tasks/determine_version.yaml @@ -0,0 +1,17 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    curator_version: "{{ __latest_curator_version }}" +  when: openshift_logging_image_version == 'latest' + +# should we just assume that we will have the correct major version? +- set_fact: curator_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Curator +  when: curator_version not in __allowed_curator_versions diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml new file mode 100644 index 000000000..ae7e48caa --- /dev/null +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -0,0 +1,113 @@ +--- +- include: determine_version.yaml + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +# This may not be necessary in this role +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# service account +- name: Create Curator service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create Curator service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +# configmap +- copy: +    src: curator.yml +    dest: "{{ tempdir }}/curator.yml" +  when: curator_config_contents is undefined +  changed_when: no + +- copy: +    content: "{{ curator_config_contents }}" +    dest: "{{ tempdir }}/curator.yml" +  when: curator_config_contents is defined +  changed_when: no + +- name: Set Curator configmap +  oc_configmap: +    state: present +    name: "logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +    from_file: +      config.yaml: "{{ tempdir }}/curator.yml" + +# secret +- name: Set Curator secret +  oc_secret: +    state: present +    name: "logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - name: ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: key +      path: "{{ generated_certs_dir }}/system.logging.curator.key" +    - name: cert +      path: "{{ generated_certs_dir }}/system.logging.curator.crt" + +- set_fact: +    curator_name: "{{ 'logging-curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" +    curator_component: "{{ 'curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" + +# DC +# TODO: scale should not exceed 1 +- name: Generate Curator deploymentconfig +  template: +    src: curator.j2 +    dest: "{{ tempdir }}/templates/curator-dc.yaml" +  vars: +    component: "{{ curator_component }}" +    logging_component: curator +    deploy_name: "{{ curator_name }}" +    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" +    es_host: "{{ openshift_logging_curator_es_host }}" +    es_port: "{{ openshift_logging_curator_es_port }}" +    curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" +    curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}" +    replicas: "{{ openshift_logging_curator_replicas | default (1) }}" +    curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}" +  check_mode: no +  changed_when: no + +- name: Set Curator DC +  oc_obj: +    state: present +    name: "{{ curator_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    kind: dc +    files: +    - "{{ tempdir }}/templates/curator-dc.yaml" +    delete_after: true + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2 index c6284166b..f8b84861f 100644 --- a/roles/openshift_logging/templates/curator.j2 +++ b/roles/openshift_logging_curator/templates/curator.j2 @@ -7,7 +7,7 @@ metadata:      component: "{{component}}"      logging-infra: "{{logging_component}}"  spec: -  replicas: {{replicas|default(0)}} +  replicas: {{replicas|default(1)}}    selector:      provider: openshift      component: "{{component}}" @@ -42,13 +42,13 @@ spec:            resources:              limits:                cpu: "{{curator_cpu_limit}}" -{% if curator_memory_limit is defined and curator_memory_limit is not none %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}                memory: "{{curator_memory_limit}}"  {% endif %}            env:              -                name: "K8S_HOST_URL" -              value: "{{openshift_logging_master_url}}" +              value: "{{openshift_logging_curator_master_url}}"              -                name: "ES_HOST"                value: "{{es_host}}" @@ -89,6 +89,9 @@ spec:              - name: config                mountPath: /etc/curator/settings                readOnly: true +            - name: elasticsearch-storage +              mountPath: /elasticsearch/persistent +              readOnly: true        volumes:          - name: certs            secret: @@ -96,3 +99,5 @@ spec:          - name: config            configMap:              name: logging-curator +        - name: elasticsearch-storage +          emptyDir: {} diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml new file mode 100644 index 000000000..97525479e --- /dev/null +++ b/roles/openshift_logging_curator/vars/main.yml @@ -0,0 +1,3 @@ +--- +__latest_curator_version: "3_5" +__allowed_curator_versions: ["3_5", "3_6"] diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml new file mode 100644 index 000000000..7923059da --- /dev/null +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -0,0 +1,57 @@ +--- +### Common settings +openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_elasticsearch_namespace: logging + +openshift_logging_elasticsearch_nodeselector: "" +openshift_logging_elasticsearch_cpu_limit: 100m +openshift_logging_elasticsearch_memory_limit: 512Mi +openshift_logging_elasticsearch_recover_after_time: 5m + +openshift_logging_elasticsearch_replica_count: 1 + +# ES deployment type +openshift_logging_elasticsearch_deployment_type: "data-master" + +# ES deployment name +openshift_logging_elasticsearch_deployment_name: "" + +# One of ['emptydir', 'pvc', 'hostmount'] +openshift_logging_elasticsearch_storage_type: "emptydir" + +# hostmount options +openshift_logging_elasticsearch_hostmount_path: "" + +# pvc options +# the name of the PVC we will bind to -- create it if it does not exist +openshift_logging_elasticsearch_pvc_name: "" + +# required if the PVC does not already exist +openshift_logging_elasticsearch_pvc_size: "" +openshift_logging_elasticsearch_pvc_dynamic: false +openshift_logging_elasticsearch_pvc_pv_selector: {} +openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce'] +openshift_logging_elasticsearch_storage_group: '65534' + +openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" + +# this is used to determine if this is an operations deployment or a non-ops deployment +# simply used for naming purposes +openshift_logging_elasticsearch_ops_deployment: false + +openshift_logging_elasticsearch_ops_allow_cluster_reader: false + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#es_logging_contents: +#es_config_contents: + + +openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" +openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_es_host: logging-es +openshift_logging_es_port: 9200 +openshift_logging_es_ca: /etc/fluent/keys/ca +openshift_logging_es_client_cert: /etc/fluent/keys/cert +openshift_logging_es_client_key: /etc/fluent/keys/key diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh index 339b5a1b2..339b5a1b2 100644 --- a/roles/openshift_logging/files/es_migration.sh +++ b/roles/openshift_logging_elasticsearch/files/es_migration.sh diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml new file mode 100644 index 000000000..567c9f289 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ClusterRole +metadata: +  name: rolebinding-reader +rules: +- resources: +    - clusterrolebindings +  verbs: +    - get diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml new file mode 100644 index 000000000..097270772 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Elasticsearch Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml new file mode 100644 index 000000000..1a952b5cf --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml @@ -0,0 +1,19 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    es_version: "{{ __latest_es_version }}" +  when: openshift_logging_image_version == 'latest' + +- debug: var=openshift_logging_image_version + +# should we just assume that we will have the correct major version? +- set_fact: es_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Elasticsearch +  when: es_version not in __allowed_es_versions diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml new file mode 100644 index 000000000..7e88a7498 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -0,0 +1,278 @@ +--- +- name: Validate Elasticsearch cluster size +  fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this." +  when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int + +- name: Validate Elasticsearch Ops cluster size +  fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this." +  when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int + +- fail: +    msg: Invalid deployment type, one of ['data-master', 'data-client', 'master', 'client'] allowed +  when: not openshift_logging_elasticsearch_deployment_type in __allowed_es_types + +- set_fact: +    elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" +    es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" + +- include: determine_version.yaml + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +# This may not be necessary in this role +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# service account +- name: Create ES service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-elasticsearch" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create ES service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-elasticsearch" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +# rolebinding reader +- copy: +    src: rolebinding-reader.yml +    dest: "{{ tempdir }}/rolebinding-reader.yml" + +- name: Create rolebinding-reader role +  oc_obj: +    state: present +    name: "rolebinding-reader" +    kind: clusterrole +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    files: +    - "{{ tempdir }}/rolebinding-reader.yml" +    delete_after: true + +# SA roles +- name: Set rolebinding-reader permissions for ES +  oc_adm_policy_user: +    state: present +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    resource_kind: cluster-role +    resource_name: rolebinding-reader +    user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch" + +# View role and binding +- name: Generate logging-elasticsearch-view-role +  template: +    src: rolebinding.j2 +    dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml" +  vars: +    obj_name: logging-elasticsearch-view-role +    roleRef: +      name: view +    subjects: +    - kind: ServiceAccount +      name: aggregated-logging-elasticsearch +  changed_when: no + +- name: Set logging-elasticsearch-view-role role +  oc_obj: +    state: present +    name: "logging-elasticsearch-view-role" +    kind: rolebinding +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    files: +    - "{{ tempdir }}/logging-elasticsearch-view-role.yaml" +    delete_after: true + +# configmap +- template: +    src: elasticsearch-logging.yml.j2 +    dest: "{{ tempdir }}/elasticsearch-logging.yml" +  when: es_logging_contents is undefined +  changed_when: no + +- template: +    src: elasticsearch.yml.j2 +    dest: "{{ tempdir }}/elasticsearch.yml" +  vars: +    allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" +    es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" +    es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}" +  when: es_config_contents is undefined +  changed_when: no + +- copy: +    content: "{{ es_logging_contents }}" +    dest: "{{ tempdir }}/elasticsearch-logging.yml" +  when: es_logging_contents is defined +  changed_when: no + +- copy: +    content: "{{ es_config_contents }}" +    dest: "{{ tempdir }}/elasticsearch.yml" +  when: es_config_contents is defined +  changed_when: no + +- name: Set ES configmap +  oc_configmap: +    state: present +    name: "{{ elasticsearch_name }}" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    from_file: +      elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" +      logging.yml: "{{ tempdir }}/elasticsearch-logging.yml" + + +# secret +- name: Set ES secret +  oc_secret: +    state: present +    name: "logging-elasticsearch" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    files: +    - name: key +      path: "{{ generated_certs_dir }}/logging-es.jks" +    - name: truststore +      path: "{{ generated_certs_dir }}/truststore.jks" +    - name: searchguard.key +      path: "{{ generated_certs_dir }}/elasticsearch.jks" +    - name: searchguard.truststore +      path: "{{ generated_certs_dir }}/truststore.jks" +    - name: admin-key +      path: "{{ generated_certs_dir }}/system.admin.key" +    - name: admin-cert +      path: "{{ generated_certs_dir }}/system.admin.crt" +    - name: admin-ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: admin.jks +      path: "{{ generated_certs_dir }}/system.admin.jks" + +# services +- name: Set logging-{{ es_component }}-cluster service +  oc_service: +    state: present +    name: "logging-{{ es_component }}-cluster" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    selector: +      component: "{{ es_component }}" +      provider: openshift +    # pending #4091 +    #labels: +    #- logging-infra: 'support' +    ports: +    - port: 9300 + +- name: Set logging-{{ es_component }} service +  oc_service: +    state: present +    name: "logging-{{ es_component }}" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    selector: +      component: "{{ es_component }}" +      provider: openshift +    # pending #4091 +    #labels: +    #- logging-infra: 'support' +    ports: +    - port: 9200 +      targetPort: "restapi" + +- name: Creating ES storage template +  template: +    src: pvc.j2 +    dest: "{{ tempdir }}/templates/logging-es-pvc.yml" +  vars: +    obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" +    size: "{{ openshift_logging_elasticsearch_pvc_size }}" +    access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" +    pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" +  when: +  - openshift_logging_elasticsearch_storage_type == "pvc" +  - not openshift_logging_elasticsearch_pvc_dynamic + +- name: Creating ES storage template +  template: +    src: pvc.j2 +    dest: "{{ tempdir }}/templates/logging-es-pvc.yml" +  vars: +    obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" +    size: "{{ openshift_logging_elasticsearch_pvc_size }}" +    access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" +    pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" +    annotations: +      volume.alpha.kubernetes.io/storage-class: "dynamic" +  when: +  - openshift_logging_elasticsearch_storage_type == "pvc" +  - openshift_logging_elasticsearch_pvc_dynamic + +- name: Set ES storage +  oc_obj: +    state: present +    kind: pvc +    name: "{{ openshift_logging_elasticsearch_pvc_name }}" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    files: +    - "{{ tempdir }}/templates/logging-es-pvc.yml" +    delete_after: true +  when: +  - openshift_logging_elasticsearch_storage_type == "pvc" + +- set_fact: +    es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}" +  when: openshift_logging_elasticsearch_deployment_name == "" + +- set_fact: +    es_deploy_name: "{{ openshift_logging_elasticsearch_deployment_name }}" +  when: openshift_logging_elasticsearch_deployment_name != "" + +# DC +- name: Set ES dc templates +  template: +    src: es.j2 +    dest: "{{ tempdir }}/templates/logging-es-dc.yml" +  vars: +    es_cluster_name: "{{ es_component }}" +    component: "{{ es_component }}" +    logging_component: elasticsearch +    deploy_name: "{{ es_deploy_name }}" +    image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}" +    es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}" +    es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" +    es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" +    deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" +    replicas: 1 + +- name: Set ES dc +  oc_obj: +    state: present +    name: "{{ es_deploy_name }}" +    namespace: "{{ openshift_logging_elasticsearch_namespace }}" +    kind: dc +    files: +    - "{{ tempdir }}/templates/logging-es-dc.yml" +    delete_after: true + +## Placeholder for migration when necessary ## + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 index 499e77fb7..377abe21f 100644 --- a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 @@ -1,25 +1,14 @@  # you can override this using by setting a system property, for example -Des.logger.level=DEBUG  es.logger.level: INFO -rootLogger: ${es.logger.level}, {{root_logger}} +rootLogger: ${es.logger.level}, console, file  logger:    # log action execution errors for easier debugging    action: WARN - -  # deprecation logging, turn to DEBUG to see them -  deprecation: WARN, deprecation_log_file -    # reduce the logging for aws, too much is logged under the default INFO    com.amazonaws: WARN -    io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}    io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL} -  # aws will try to do some sketchy JMX stuff, but its not needed. -  com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR -  com.amazonaws.metrics.AwsSdkMetrics: ERROR - -  org.apache.http: INFO -    # gateway    #gateway: DEBUG    #index.gateway: DEBUG @@ -39,14 +28,13 @@ logger:  additivity:    index.search.slowlog: false    index.indexing.slowlog: false -  deprecation: false  appender:    console:      type: console      layout:        type: consolePattern -      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n" +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"    file:      type: dailyRollingFile @@ -56,13 +44,16 @@ appender:        type: pattern        conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" -  deprecation_log_file: -    type: dailyRollingFile -    file: ${path.logs}/${cluster.name}_deprecation.log -    datePattern: "'.'yyyy-MM-dd" -    layout: -      type: pattern -      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" +  # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. +  # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html +  #file: +    #type: extrasRollingFile +    #file: ${path.logs}/${cluster.name}.log +    #rollingPolicy: timeBased +    #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz +    #layout: +      #type: pattern +      #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"    index_search_slow_log_file:      type: dailyRollingFile diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 index 355642cb7..681f5a7e6 100644 --- a/roles/openshift_logging/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 @@ -14,8 +14,8 @@ index:      flush_threshold_period: 5m  node: -  master: true -  data: true +  master: ${IS_MASTER} +  data: ${HAS_DATA}  network:    host: 0.0.0.0 diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 680c16cf4..e129205ca 100644 --- a/roles/openshift_logging/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -8,7 +8,7 @@ metadata:      deployment: "{{deploy_name}}"      logging-infra: "{{logging_component}}"  spec: -  replicas: {{replicas|default(0)}} +  replicas: {{replicas|default(1)}}    selector:      provider: openshift      component: "{{component}}" @@ -29,7 +29,7 @@ spec:        serviceAccountName: aggregated-logging-elasticsearch        securityContext:          supplementalGroups: -        - {{openshift_logging_es_storage_group}} +        - {{openshift_logging_elasticsearch_storage_group}}  {% if es_node_selector is iterable and es_node_selector | length > 0 %}        nodeSelector:  {% for key, value in es_node_selector.iteritems() %} @@ -73,7 +73,7 @@ spec:                value: "logging-{{es_cluster_name}}"              -                name: "INSTANCE_RAM" -              value: "{{openshift_logging_es_memory_limit}}" +              value: "{{openshift_logging_elasticsearch_memory_limit}}"              -                name: "NODE_QUORUM"                value: "{{es_node_quorum | int}}" @@ -82,7 +82,15 @@ spec:                value: "{{es_recover_expected_nodes}}"              -                name: "RECOVER_AFTER_TIME" -              value: "{{openshift_logging_es_recover_after_time}}" +              value: "{{openshift_logging_elasticsearch_recover_after_time}}" +            - +              name: "IS_MASTER" +              value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" + +            - +              name: "HAS_DATA" +              value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" +            volumeMounts:              - name: elasticsearch                mountPath: /etc/elasticsearch/secret @@ -107,4 +115,12 @@ spec:            configMap:              name: logging-elasticsearch          - name: elasticsearch-storage -{% include 'es-storage-'+ es_storage['kind'] + '.partial' %} +{% if openshift_logging_elasticsearch_storage_type == 'pvc' %} +          persistentVolumeClaim: +            claimName: {{ openshift_logging_elasticsearch_pvc_name }} +{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %} +          hostPath: +            path: {{ openshift_logging_elasticsearch_hostmount_path }} +{% else %} +          emptydir: {} +{% endif %} diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2 index 07d81afff..f19a3a750 100644 --- a/roles/openshift_logging/templates/pvc.j2 +++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2 @@ -1,7 +1,7 @@  apiVersion: v1  kind: PersistentVolumeClaim  metadata: -  name: "{{obj_name}}" +  name: {{obj_name}}    labels:      logging-infra: support  {% if annotations is defined %} diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 index fcd4e87cc..fcd4e87cc 100644 --- a/roles/openshift_logging/templates/rolebinding.j2 +++ b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml new file mode 100644 index 000000000..7a1f5048b --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -0,0 +1,12 @@ +--- +__latest_es_version: "3_5" +__allowed_es_versions: ["3_5", "3_6"] +__allowed_es_types: ["data-master", "data-client", "master", "client"] + +# TODO: integrate these +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}" +es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int / 2 | round(0,'floor') + 1) | int }}" +es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}" +es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" +es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml new file mode 100644 index 000000000..228196d74 --- /dev/null +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -0,0 +1,59 @@ +--- +### General logging settings +openshift_logging_fluentd_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_fluentd_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" +openshift_logging_fluentd_namespace: logging + +### Common settings +openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" +openshift_logging_fluentd_cpu_limit: 100m +openshift_logging_fluentd_memory_limit: 512Mi +openshift_logging_fluentd_hosts: ['--all'] + +# float time in seconds to wait between node labelling +openshift_logging_fluentd_label_delay: '0.5' + +# Fluentd deployment type +openshift_logging_fluentd_deployment_type: "hosted" + +### Used by "hosted" and "secure-host" deployments + +# Destination for the application based logs +openshift_logging_fluentd_app_host: "logging-es" +openshift_logging_fluentd_app_port: 9200 +# Destination for the operations based logs +openshift_logging_fluentd_ops_host: "{{ openshift_logging_fluentd_app_host }}" +openshift_logging_fluentd_ops_port: "{{ openshift_logging_fluentd_app_port }}" + +### Used by "hosted" and "secure-aggregator" deployments +#openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal }}" +openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" +openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" + +openshift_logging_fluentd_app_client_cert: /etc/fluent/keys/cert +openshift_logging_fluentd_app_client_key: /etc/fluent/keys/key +openshift_logging_fluentd_app_ca: /etc/fluent/keys/ca +openshift_logging_fluentd_ops_client_cert: /etc/fluent/keys/cert +openshift_logging_fluentd_ops_client_key: /etc/fluent/keys/key +openshift_logging_fluentd_ops_ca: /etc/fluent/keys/ca + + +# used by "secure-host" and "secure-aggregator" deployments +openshift_logging_fluentd_shared_key: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(128) }}" +openshift_logging_fluentd_aggregating_port: 24284 +openshift_logging_fluentd_aggregating_host: "${HOSTNAME}" +openshift_logging_fluentd_aggregating_secure: "no" +openshift_logging_fluentd_aggregating_strict: "no" +openshift_logging_fluentd_aggregating_cert_path: none +openshift_logging_fluentd_aggregating_key_path: none +openshift_logging_fluentd_aggregating_passphrase: none + +### Deprecating in 3.6 +openshift_logging_fluentd_es_copy: false + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#fluentd_config_contents: +#fluentd_throttle_contents: +#fluentd_secureforward_contents: diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml index 375621ff1..375621ff1 100644 --- a/roles/openshift_logging/files/fluentd-throttle-config.yaml +++ b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf index f4483df79..f4483df79 100644 --- a/roles/openshift_logging/files/secure-forward.conf +++ b/roles/openshift_logging_fluentd/files/secure-forward.conf diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml new file mode 100644 index 000000000..2003aacb2 --- /dev/null +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Fluentd Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_fluentd/tasks/determine_version.yaml b/roles/openshift_logging_fluentd/tasks/determine_version.yaml new file mode 100644 index 000000000..a1ba71b1b --- /dev/null +++ b/roles/openshift_logging_fluentd/tasks/determine_version.yaml @@ -0,0 +1,17 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    fluentd_version: "{{ __latest_fluentd_version }}" +  when: openshift_logging_image_version == 'latest' + +# should we just assume that we will have the correct major version? +- set_fact: fluentd_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Fluentd +  when: fluentd_version not in __allowed_fluentd_versions diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml new file mode 100644 index 000000000..e92a35f27 --- /dev/null +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -0,0 +1,10 @@ +--- +- name: Label {{ node }} for Fluentd deployment +  oc_label: +    name: "{{ node }}" +    kind: node +    state: add +    labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" + +# wait half a second between labels +- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml new file mode 100644 index 000000000..8194223e8 --- /dev/null +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -0,0 +1,206 @@ +--- +- fail: +    msg: Only one Fluentd nodeselector key pair should be provided +  when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" + +- fail: +    msg: Application logs destination is required +  when: not openshift_logging_fluentd_app_host or openshift_logging_fluentd_app_host == '' + +- fail: +    msg: Operations logs destination is required +  when: not openshift_logging_fluentd_ops_host or openshift_logging_fluentd_ops_host == '' + +- fail: +    msg: Invalid deployment type, one of ['hosted', 'secure-aggregator', 'secure-host'] allowed +  when: not openshift_logging_fluentd_deployment_type in __allowed_fluentd_types + +- include: determine_version.yaml + +- set_fact: +    openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal }}" +  when: +  - openshift_hosted_logging_use_journal is defined +  - openshift_logging_fluentd_use_journal is not defined + +- set_fact: +    openshift_logging_fluentd_use_journal: "{{ __fluentd_use_journal }}" +  when: +  - openshift_hosted_logging_use_journal is not defined +  - openshift_logging_fluentd_use_journal is not defined + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# create service account +- name: Create Fluentd service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-fluentd" +    namespace: "{{ openshift_logging_fluentd_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create Fluentd service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-fluentd" +    namespace: "{{ openshift_logging_fluentd_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +# set service account scc +- name: Set privileged permissions for Fluentd +  oc_adm_policy_user: +    namespace: "{{ openshift_logging_fluentd_namespace }}" +    resource_kind: scc +    resource_name: privileged +    state: present +    user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd" + +# set service account permissions +- name: Set cluster-reader permissions for Fluentd +  oc_adm_policy_user: +    namespace: "{{ openshift_logging_fluentd_namespace }}" +    resource_kind: cluster-role +    resource_name: cluster-reader +    state: present +    user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd" + +# create Fluentd configmap +- template: +    src: fluent.conf.j2 +    dest: "{{ tempdir }}/fluent.conf" +  vars: +    deploy_type: "{{ openshift_logging_fluentd_deployment_type }}" +  when: fluentd_config_contents is undefined +  changed_when: no + +- copy: +    src: fluentd-throttle-config.yaml +    dest: "{{ tempdir }}/fluentd-throttle-config.yaml" +  when: fluentd_throttle_contents is undefined +  changed_when: no + +- copy: +    src: secure-forward.conf +    dest: "{{ tempdir }}/secure-forward.conf" +  when: fluentd_securefoward_contents is undefined + +  changed_when: no + +- copy: +    content: "{{ fluentd_config_contents }}" +    dest: "{{ tempdir }}/fluent.conf" +  when: fluentd_config_contents is defined +  changed_when: no + +- copy: +    content: "{{ fluentd_throttle_contents }}" +    dest: "{{ tempdir }}/fluentd-throttle-config.yaml" +  when: fluentd_throttle_contents is defined +  changed_when: no + +- copy: +    content: "{{ fluentd_secureforward_contents }}" +    dest: "{{ tempdir }}/secure-forward.conf" +  when: fluentd_secureforward_contents is defined +  changed_when: no + +- name: Set Fluentd configmap +  oc_configmap: +    state: present +    name: "logging-fluentd" +    namespace: "{{ openshift_logging_fluentd_namespace }}" +    from_file: +      fluent.conf: "{{ tempdir }}/fluent.conf" +      throttle-config.yaml: "{{ tempdir }}/fluentd-throttle-config.yaml" +      secure-forward.conf: "{{ tempdir }}/secure-forward.conf" + +# create Fluentd secret +# TODO: add aggregation secrets if necessary +- name: Set logging-fluentd secret +  oc_secret: +    state: present +    name: logging-fluentd +    namespace: "{{ openshift_logging_fluentd_namespace }}" +    files: +    - name: ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: key +      path: "{{ generated_certs_dir }}/system.logging.fluentd.key" +    - name: cert +      path: "{{ generated_certs_dir }}/system.logging.fluentd.crt" + +# create Fluentd daemonset + +# this should change based on the type of fluentd deployment to be done... +# TODO: pass in aggregation configurations +- name: Generate logging-fluentd daemonset definition +  template: +    src: fluentd.j2 +    dest: "{{ tempdir }}/templates/logging-fluentd.yaml" +  vars: +    daemonset_name: logging-fluentd +    daemonset_component: fluentd +    daemonset_container_name: fluentd-elasticsearch +    daemonset_serviceAccount: aggregated-logging-fluentd +    app_host: "{{ openshift_logging_fluentd_app_host }}" +    app_port: "{{ openshift_logging_fluentd_app_port }}" +    ops_host: "{{ openshift_logging_fluentd_ops_host }}" +    ops_port: "{{ openshift_logging_fluentd_ops_port }}" +    fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" +    fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" +  check_mode: no +  changed_when: no + +- name: Set logging-fluentd daemonset +  oc_obj: +    state: present +    name: logging-fluentd +    namespace: "{{ openshift_logging_fluentd_namespace }}" +    kind: daemonset +    files: +    - "{{ tempdir }}/templates/logging-fluentd.yaml" +    delete_after: true + +# Scale up Fluentd +- name: Retrieve list of Fluentd hosts +  oc_obj: +    state: list +    kind: node +  when: "'--all' in openshift_logging_fluentd_hosts" +  register: fluentd_hosts + +- name: Set openshift_logging_fluentd_hosts +  set_fact: +    openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}" +  when: "'--all' in openshift_logging_fluentd_hosts" + +- include: label_and_wait.yaml +  vars: +    node: "{{ fluentd_host }}" +  with_items: "{{ openshift_logging_fluentd_hosts }}" +  loop_control: +    loop_var: fluentd_host + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 new file mode 100644 index 000000000..46de94d60 --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 @@ -0,0 +1,78 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +{% if deploy_type in ['hosted', 'secure-aggregator'] %} +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## +{% else %} +<source> +  @type secure_forward +  @label @INGRESS + +  self_hostname ${HOSTNAME} +  bind 0.0.0.0 +  port {{openshift_logging_fluentd_aggregating_port}} + +  shared_key {{openshift_logging_fluentd_shared_key}} + +  secure {{openshift_logging_fluentd_aggregating_secure}} +  enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} +  ca_cert_path        {{openshift_logging_fluentd_aggregating_cert_path}} +  ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} +  ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + +  <client> +    host {{openshift_logging_fluentd_aggregating_host}} +  </client> +</source> +{% endif %} + +<label @INGRESS> +{% if deploy_type in ['hosted', 'secure-host'] %} +## filters +  @include configs.d/openshift/filter-pre-*.conf +  @include configs.d/openshift/filter-retag-journal.conf +  @include configs.d/openshift/filter-k8s-meta.conf +  @include configs.d/openshift/filter-kibana-transform.conf +  @include configs.d/openshift/filter-k8s-flatten-hash.conf +  @include configs.d/openshift/filter-k8s-record-transform.conf +  @include configs.d/openshift/filter-syslog-record-transform.conf +  @include configs.d/openshift/filter-viaq-data-model.conf +  @include configs.d/openshift/filter-post-*.conf +## + +## matches +  @include configs.d/openshift/output-pre-*.conf +  @include configs.d/openshift/output-operations.conf +  @include configs.d/openshift/output-applications.conf +  # no post - applications.conf matches everything left +## +{% else %} +  <match **> +    @type secure_forward + +    self_hostname ${HOSTNAME} +    shared_key {{openshift_logging_fluentd_shared_key}} + +    secure {{openshift_logging_fluentd_aggregating_secure}} +    enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} +    ca_cert_path        {{openshift_logging_fluentd_aggregating_cert_path}} +    ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} +    ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + +    <server> +      host {{openshift_logging_fluentd_aggregating_host}} +      port {{openshift_logging_fluentd_aggregating_port}} +    </server> +  </match> +{% endif %} +</label> diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 new file mode 100644 index 000000000..e185938e3 --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -0,0 +1,123 @@ +apiVersion: extensions/v1beta1 +kind: "DaemonSet" +metadata: +  name: "{{ daemonset_name }}" +  labels: +    provider: openshift +    component: "{{ daemonset_component }}" +    logging-infra: "{{ daemonset_component }}" +spec: +  selector: +    matchLabels: +      provider: openshift +      component: "{{ daemonset_component }}" +  updateStrategy: +    type: RollingUpdate +    rollingUpdate: +      minReadySeconds: 600 +  template: +    metadata: +      name: "{{ daemonset_container_name }}" +      labels: +        logging-infra: "{{ daemonset_component }}" +        provider: openshift +        component: "{{ daemonset_component }}" +    spec: +      serviceAccountName: "{{ daemonset_serviceAccount }}" +      nodeSelector: +        {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}" +      containers: +      - name: "{{ daemonset_container_name }}" +        image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}" +        imagePullPolicy: Always +        securityContext: +          privileged: true +        resources: +          limits: +            cpu: {{ openshift_logging_fluentd_cpu_limit }} +            memory: {{ openshift_logging_fluentd_memory_limit }} +        volumeMounts: +        - name: runlogjournal +          mountPath: /run/log/journal +        - name: varlog +          mountPath: /var/log +        - name: varlibdockercontainers +          mountPath: /var/lib/docker/containers +          readOnly: true +        - name: config +          mountPath: /etc/fluent/configs.d/user +          readOnly: true +        - name: certs +          mountPath: /etc/fluent/keys +          readOnly: true +        - name: dockerhostname +          mountPath: /etc/docker-hostname +          readOnly: true +        - name: localtime +          mountPath: /etc/localtime +          readOnly: true +        - name: dockercfg +          mountPath: /etc/sysconfig/docker +          readOnly: true +        - name: dockerdaemoncfg +          mountPath: /etc/docker +          readOnly: true +        env: +        - name: "K8S_HOST_URL" +          value: "{{ openshift_logging_fluentd_master_url }}" +        - name: "ES_HOST" +          value: "{{ app_host }}" +        - name: "ES_PORT" +          value: "{{ app_port }}" +        - name: "ES_CLIENT_CERT" +          value: "{{ openshift_logging_fluentd_app_client_cert }}" +        - name: "ES_CLIENT_KEY" +          value: "{{ openshift_logging_fluentd_app_client_key }}" +        - name: "ES_CA" +          value: "{{ openshift_logging_fluentd_app_ca }}" +        - name: "OPS_HOST" +          value: "{{ ops_host }}" +        - name: "OPS_PORT" +          value: "{{ ops_port }}" +        - name: "OPS_CLIENT_CERT" +          value: "{{ openshift_logging_fluentd_ops_client_cert }}" +        - name: "OPS_CLIENT_KEY" +          value: "{{ openshift_logging_fluentd_ops_client_key }}" +        - name: "OPS_CA" +          value: "{{ openshift_logging_fluentd_ops_ca }}" +        - name: "ES_COPY" +          value: "false" +        - name: "USE_JOURNAL" +          value: "{{ openshift_logging_fluentd_use_journal | lower }}" +        - name: "JOURNAL_SOURCE" +          value: "{{ openshift_logging_fluentd_journal_source | default('') }}" +        - name: "JOURNAL_READ_FROM_HEAD" +          value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}" +      volumes: +      - name: runlogjournal +        hostPath: +          path: /run/log/journal +      - name: varlog +        hostPath: +          path: /var/log +      - name: varlibdockercontainers +        hostPath: +          path: /var/lib/docker/containers +      - name: config +        configMap: +          name: logging-fluentd +      - name: certs +        secret: +          secretName: logging-fluentd +      - name: dockerhostname +        hostPath: +          path: /etc/hostname +      - name: localtime +        hostPath: +          path: /etc/localtime +      - name: dockercfg +        hostPath: +          path: /etc/sysconfig/docker +      - name: dockerdaemoncfg +        hostPath: +          path: /etc/docker diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml new file mode 100644 index 000000000..f601b738e --- /dev/null +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -0,0 +1,5 @@ +--- +__latest_fluentd_version: "3_5" +__allowed_fluentd_versions: ["3_5", "3_6"] +__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] +__fluentd_use_journal: "{{ (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}" diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml new file mode 100644 index 000000000..23337bcd2 --- /dev/null +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -0,0 +1,41 @@ +--- +### Common settings +openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local" +openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local" +openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_kibana_namespace: logging + +openshift_logging_kibana_nodeselector: "" +openshift_logging_kibana_cpu_limit: null +openshift_logging_kibana_memory_limit: 736Mi + +openshift_logging_kibana_hostname: "kibana.router.default.svc.cluster.local" + +openshift_logging_kibana_es_host: "logging-es" +openshift_logging_kibana_es_port: 9200 + +openshift_logging_kibana_replicas: 1 +openshift_logging_kibana_edge_term_policy: Redirect + +# this is used to determine if this is an operations deployment or a non-ops deployment +# simply used for naming purposes +openshift_logging_kibana_ops_deployment: false + +# Proxy settings +openshift_logging_kibana_proxy_debug: false +openshift_logging_kibana_proxy_cpu_limit: null +openshift_logging_kibana_proxy_memory_limit: 96Mi + +#The absolute path on the control node to the cert file to use +#for the public facing kibana certs +openshift_logging_kibana_cert: "" + +#The absolute path on the control node to the key file to use +#for the public facing kibana certs +openshift_logging_kibana_key: "" + +#The absolute path on the control node to the CA file to use +#for the public facing kibana certs +openshift_logging_kibana_ca: "" diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml new file mode 100644 index 000000000..89e08abc0 --- /dev/null +++ b/roles/openshift_logging_kibana/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Kibana Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_kibana/tasks/determine_version.yaml b/roles/openshift_logging_kibana/tasks/determine_version.yaml new file mode 100644 index 000000000..53e15af5f --- /dev/null +++ b/roles/openshift_logging_kibana/tasks/determine_version.yaml @@ -0,0 +1,17 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    kibana_version: "{{ __latest_kibana_version }}" +  when: openshift_logging_image_version == 'latest' + +# should we just assume that we will have the correct major version? +- set_fact: kibana_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Kibana +  when: kibana_version not in __allowed_kibana_versions diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml new file mode 100644 index 000000000..55b28ee24 --- /dev/null +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -0,0 +1,232 @@ +--- +# fail is we don't have an endpoint for ES to connect to? + +- include: determine_version.yaml + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +# This may not be necessary in this role +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# create service account +- name: Create Kibana service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-kibana" +    namespace: "{{ openshift_logging_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create Kibana service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-kibana" +    namespace: "{{ openshift_logging_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +- set_fact: +    kibana_name: "{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" +    kibana_component: "{{ 'kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" + +- name: Retrieving the cert to use when generating secrets for the logging components +  slurp: +    src: "{{ generated_certs_dir }}/{{ item.file }}" +  register: key_pairs +  with_items: +  - { name: "ca_file", file: "ca.crt" } +  - { name: "kibana_internal_key", file: "kibana-internal.key"} +  - { name: "kibana_internal_cert", file: "kibana-internal.crt"} +  - { name: "server_tls", file: "server-tls.json"} + +# services +- name: Set {{ kibana_name }} service +  oc_service: +    state: present +    name: "{{ kibana_name }}" +    namespace: "{{ openshift_logging_kibana_namespace }}" +    selector: +      component: "{{ kibana_component }}" +      provider: openshift +    # pending #4091 +    #labels: +    #- logging-infra: 'support' +    ports: +    - port: 443 +      targetPort: "oaproxy" + +# create routes +# TODO: set up these certs differently? +- set_fact: +    kibana_key: "{{ lookup('file', openshift_logging_kibana_key) | b64encode }}" +  when: "{{ openshift_logging_kibana_key | trim | length > 0 }}" +  changed_when: false + +- set_fact: +    kibana_cert: "{{ lookup('file', openshift_logging_kibana_cert) | b64encode }}" +  when: "{{ openshift_logging_kibana_cert | trim | length > 0 }}" +  changed_when: false + +- set_fact: +    kibana_ca: "{{ lookup('file', openshift_logging_kibana_ca) | b64encode }}" +  when: "{{ openshift_logging_kibana_ca | trim | length > 0 }}" +  changed_when: false + +- set_fact: +    kibana_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}" +  when: kibana_ca is not defined +  changed_when: false + +- name: Generating Kibana route template +  template: +    src: route_reencrypt.j2 +    dest: "{{ tempdir }}/templates/kibana-route.yaml" +  vars: +    obj_name: "{{ kibana_name }}" +    route_host: "{{ openshift_logging_kibana_hostname }}" +    service_name: "{{ kibana_name }}" +    tls_key: "{{ kibana_key | default('') | b64decode }}" +    tls_cert: "{{ kibana_cert | default('') | b64decode }}" +    tls_ca_cert: "{{ kibana_ca | b64decode }}" +    tls_dest_ca_cert: "{{ key_pairs | entry_from_named_pair('ca_file') | b64decode }}" +    edge_term_policy: "{{ openshift_logging_kibana_edge_term_policy | default('') }}" +    labels: +      component: support +      logging-infra: support +      provider: openshift +  changed_when: no + +# This currently has an issue if the host name changes +- name: Setting Kibana route +  oc_obj: +    state: present +    name: "{{ kibana_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    kind: route +    files: +    - "{{ tempdir }}/templates/kibana-route.yaml" + +# gen session_secret -- if necessary +# TODO: make idempotent +- name: Generate proxy session +  set_fact: +    session_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(200) }}" +  check_mode: no + +# gen oauth_secret -- if necessary +# TODO: make idempotent +- name: Generate oauth client secret +  set_fact: +    oauth_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(64) }}" +  check_mode: no + +# create oauth client +- name: Create oauth-client template +  template: +    src: oauth-client.j2 +    dest: "{{ tempdir }}/templates/oauth-client.yml" +  vars: +    kibana_hostname: "{{ openshift_logging_kibana_hostname }}" +    secret: "{{ oauth_secret }}" + +- name: Set kibana-proxy oauth-client +  oc_obj: +    state: present +    name: "kibana-proxy" +    namespace: "{{ openshift_logging_namespace }}" +    kind: oauthclient +    files: +    - "{{ tempdir }}/templates/oauth-client.yml" +    delete_after: true + +# create Kibana secret +- name: Set Kibana secret +  oc_secret: +    state: present +    name: "logging-kibana" +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - name: ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: key +      path: "{{ generated_certs_dir }}/system.logging.kibana.key" +    - name: cert +      path: "{{ generated_certs_dir }}/system.logging.kibana.crt" + +# create Kibana-proxy secret +- name: Set Kibana Proxy secret +  oc_secret: +    state: present +    name: "logging-kibana-proxy" +    namespace: "{{ openshift_logging_namespace }}" +    # TODO: when possible to have both files and contents for oc_secret use this +    #files: +    #- name: server-key +    #  path: "{{ generated_certs_dir }}/kibana-internal.key" +    #- name: server-cert +    #  path: "{{ generated_certs_dir }}/kibana-internal.crt" +    #- name: server-tls +    #  path: "{{ generated_certs_dir }}/server-tls.json" +    contents: +    - path: oauth-secret +      data: "{{ oauth_secret }}" +    - path: session-secret +      data: "{{ session_secret }}" +    - path: server-key +      data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}" +    - path: server-cert +      data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}" +    - path: server-tls +      data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}" + +# create Kibana DC +- name: Generate Kibana DC template +  template: +    src: kibana.j2 +    dest: "{{ tempdir }}/templates/kibana-dc.yaml" +  vars: +    component: "{{ kibana_component }}" +    logging_component: kibana +    deploy_name: "{{ kibana_name }}" +    image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}" +    proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}" +    es_host: "{{ openshift_logging_kibana_es_host }}" +    es_port: "{{ openshift_logging_kibana_es_port }}" +    kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}" +    kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}" +    kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}" +    kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" +    replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" +    kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" + +- name: Set Kibana DC +  oc_obj: +    state: present +    name: "{{ kibana_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    kind: dc +    files: +    - "{{ tempdir }}/templates/kibana-dc.yaml" +    delete_after: true + +# update master configs? + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index 25fab9ac4..f8043812b 100644 --- a/roles/openshift_logging/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -1,17 +1,17 @@  apiVersion: "v1"  kind: "DeploymentConfig"  metadata: -  name: "{{deploy_name}}" +  name: "{{ deploy_name }}"    labels:      provider: openshift -    component: "{{component}}" -    logging-infra: "{{logging_component}}" +    component: "{{ component }}" +    logging-infra: "{{ logging_component }}"  spec: -  replicas: {{replicas|default(0)}} +  replicas: {{ replicas | default(1) }}    selector:      provider: openshift -    component: "{{component}}" -    logging-infra: "{{logging_component}}" +    component: "{{ component }}" +    logging-infra: "{{ logging_component }}"    strategy:      rollingParams:        intervalSeconds: 1 @@ -20,37 +20,39 @@ spec:      type: Rolling    template:      metadata: -      name: "{{deploy_name}}" +      name: "{{ deploy_name }}"        labels: -        logging-infra: "{{logging_component}}" +        logging-infra: "{{ logging_component }}"          provider: openshift -        component: "{{component}}" +        component: "{{ component }}"      spec:        serviceAccountName: aggregated-logging-kibana  {% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}        nodeSelector:  {% for key, value in kibana_node_selector.iteritems() %} -        {{key}}: "{{value}}" +        {{ key }}: "{{ value }}"  {% endfor %}  {% endif %}        containers:          -            name: "kibana" -          image: {{image}} +          image: {{ image }}            imagePullPolicy: Always -{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %} +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}            resources:              limits: -{% if kibana_cpu_limit is not none %} -              cpu: "{{kibana_cpu_limit}}" +{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} +              cpu: "{{ kibana_cpu_limit }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} +              memory: "{{ kibana_memory_limit }}"  {% endif %} -              memory: "{{kibana_memory_limit | default('736Mi') }}"  {% endif %}            env:              - name: "ES_HOST" -              value: "{{es_host}}" +              value: "{{ es_host }}"              - name: "ES_PORT" -              value: "{{es_port}}" +              value: "{{ es_port }}"              -                name: "KIBANA_MEMORY_LIMIT"                valueFrom: @@ -61,17 +63,26 @@ spec:              - name: kibana                mountPath: /etc/kibana/keys                readOnly: true +          readinessProbe: +            exec: +              command: +              - "/usr/share/kibana/probe/readiness.sh" +            initialDelaySeconds: 5 +            timeoutSeconds: 4 +            periodSeconds: 5          -            name: "kibana-proxy" -          image: {{proxy_image}} +          image: {{ proxy_image }}            imagePullPolicy: Always -{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %} +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}            resources:              limits: -{% if kibana_proxy_cpu_limit is not none %} -              cpu: "{{kibana_proxy_cpu_limit}}" +{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} +              cpu: "{{ kibana_proxy_cpu_limit }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} +              memory: "{{ kibana_proxy_memory_limit }}"  {% endif %} -              memory: "{{kibana_proxy_memory_limit | default('96Mi') }}"  {% endif %}            ports:              - @@ -92,19 +103,19 @@ spec:               value: kibana-proxy              -               name: "OAP_MASTER_URL" -             value: {{openshift_logging_master_url}} +             value: {{ openshift_logging_kibana_master_url }}              -               name: "OAP_PUBLIC_MASTER_URL" -             value: {{openshift_logging_master_public_url}} +             value: {{ openshift_logging_kibana_master_public_url }}              -               name: "OAP_LOGOUT_REDIRECT" -             value: {{openshift_logging_master_public_url}}/console/logout +             value: {{ openshift_logging_kibana_master_public_url }}/console/logout              -               name: "OAP_MASTER_CA_FILE"               value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"              -               name: "OAP_DEBUG" -             value: "{{openshift_logging_kibana_proxy_debug}}" +             value: "{{ openshift_logging_kibana_proxy_debug }}"              -               name: "OAP_OAUTH_SECRET_FILE"               value: "/secret/oauth-secret" diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2 index 41d3123cb..6767f6d89 100644 --- a/roles/openshift_logging/templates/oauth-client.j2 +++ b/roles/openshift_logging_kibana/templates/oauth-client.j2 @@ -6,8 +6,7 @@ metadata:      logging-infra: support  secret: {{secret}}  redirectURIs: -- https://{{openshift_logging_kibana_hostname}} -- https://{{openshift_logging_kibana_ops_hostname}} +- https://{{kibana_hostname}}  scopeRestrictions:  - literals:    - user:info diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2 index cf8a9e65f..cf8a9e65f 100644 --- a/roles/openshift_logging/templates/route_reencrypt.j2 +++ b/roles/openshift_logging_kibana/templates/route_reencrypt.j2 diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml new file mode 100644 index 000000000..87b281c4b --- /dev/null +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -0,0 +1,3 @@ +--- +__latest_kibana_version: "3_5" +__allowed_kibana_versions: ["3_5", "3_6"] diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml new file mode 100644 index 000000000..8aaa28706 --- /dev/null +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -0,0 +1,43 @@ +--- +### General logging settings +openshift_logging_mux_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_mux_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" +openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_mux_namespace: logging + +### Common settings +openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}" +openshift_logging_mux_cpu_limit: 100m +openshift_logging_mux_memory_limit: 512Mi + +openshift_logging_mux_replicas: 1 + +# Destination for the application based logs +openshift_logging_mux_app_host: "logging-es" +openshift_logging_mux_app_port: 9200 +# Destination for the operations based logs +openshift_logging_mux_ops_host: "{{ openshift_logging_mux_app_host }}" +openshift_logging_mux_ops_port: "{{ openshift_logging_mux_app_port }}" + +### Used by "hosted" and "secure-aggregator" deployments +openshift_logging_mux_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}" +openshift_logging_mux_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" +openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" + +openshift_logging_mux_allow_external: false +openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_port: 24284 + +openshift_logging_mux_app_client_cert: /etc/fluent/keys/cert +openshift_logging_mux_app_client_key: /etc/fluent/keys/key +openshift_logging_mux_app_ca: /etc/fluent/keys/ca +openshift_logging_mux_ops_client_cert: /etc/fluent/keys/cert +openshift_logging_mux_ops_client_key: /etc/fluent/keys/key +openshift_logging_mux_ops_ca: /etc/fluent/keys/ca + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#mux_config_contents: +#mux_throttle_contents: +#mux_secureforward_contents: diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf index aeaa705ee..aeaa705ee 100644 --- a/roles/openshift_logging/files/fluent.conf +++ b/roles/openshift_logging_mux/files/fluent.conf diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf new file mode 100644 index 000000000..f4483df79 --- /dev/null +++ b/roles/openshift_logging_mux/files/secure-forward.conf @@ -0,0 +1,24 @@ +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key <SECRET_STRING> + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key +  # for private CA secret key +# ca_private_key_passphrase passphrase + +# <server> +  # or IP +#   host server.fqdn.example.com +#   port 24284 +# </server> +# <server> +  # ip address to connect +#   host 203.0.113.8 +  # specify hostlabel for FQDN verification if ipaddress is used for host +#   hostlabel server.fqdn.example.com +# </server> diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml new file mode 100644 index 000000000..f40beb79d --- /dev/null +++ b/roles/openshift_logging_mux/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Mux Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_mux/tasks/determine_version.yaml b/roles/openshift_logging_mux/tasks/determine_version.yaml new file mode 100644 index 000000000..229bcf3d5 --- /dev/null +++ b/roles/openshift_logging_mux/tasks/determine_version.yaml @@ -0,0 +1,17 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    mux_version: "{{ __latest_mux_version }}" +  when: openshift_logging_image_version == 'latest' + +# should we just assume that we will have the correct major version? +- set_fact: mux_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for mux +  when: mux_version not in __allowed_mux_versions diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml new file mode 100644 index 000000000..432cab9e9 --- /dev/null +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -0,0 +1,197 @@ +--- +- fail: +    msg: Application logs destination is required +  when: not openshift_logging_mux_app_host or openshift_logging_mux_app_host == '' + +- fail: +    msg: Operations logs destination is required +  when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == '' + +- include: determine_version.yaml + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# create service account +- name: Create Mux service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-mux" +    namespace: "{{ openshift_logging_mux_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create Mux service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-mux" +    namespace: "{{ openshift_logging_mux_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +# set service account scc +- name: Set privileged permissions for Mux +  oc_adm_policy_user: +    namespace: "{{ openshift_logging_mux_namespace }}" +    resource_kind: scc +    resource_name: privileged +    state: present +    user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux" + +# set service account permissions +- name: Set cluster-reader permissions for Mux +  oc_adm_policy_user: +    namespace: "{{ openshift_logging_mux_namespace }}" +    resource_kind: cluster-role +    resource_name: cluster-reader +    state: present +    user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux" + +# set hostmount-anyuid permissions +- name: Set hostmount-anyuid permissions for Mux +  oc_adm_policy_user: +    namespace: "{{ openshift_logging_mux_namespace }}" +    resource_kind: scc +    resource_name: hostmount-anyuid +    state: present +    user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux" + +# create Mux configmap +- copy: +    src: fluent.conf +    dest: "{{mktemp.stdout}}/fluent-mux.conf" +  when: fluentd_mux_config_contents is undefined +  changed_when: no + +- copy: +    src: secure-forward.conf +    dest: "{{mktemp.stdout}}/secure-forward-mux.conf" +  when: fluentd_mux_securefoward_contents is undefined +  changed_when: no + +- copy: +    content: "{{fluentd_mux_config_contents}}" +    dest: "{{mktemp.stdout}}/fluent-mux.conf" +  when: fluentd_mux_config_contents is defined +  changed_when: no + +- copy: +    content: "{{fluentd_mux_secureforward_contents}}" +    dest: "{{mktemp.stdout}}/secure-forward-mux.conf" +  when: fluentd_mux_secureforward_contents is defined +  changed_when: no + +- name: Set Mux configmap +  oc_configmap: +    state: present +    name: "logging-mux" +    namespace: "{{ openshift_logging_mux_namespace }}" +    from_file: +      fluent.conf: "{{ tempdir }}/fluent-mux.conf" +      secure-forward.conf: "{{ tempdir }}/secure-forward-mux.conf" + +# create Mux secret +- name: Set logging-mux secret +  oc_secret: +    state: present +    name: logging-mux +    namespace: "{{ openshift_logging_mux_namespace }}" +    files: +    - name: ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: key +      path: "{{ generated_certs_dir }}/system.logging.mux.key" +    - name: cert +      path: "{{ generated_certs_dir }}/system.logging.mux.crt" +    - name: shared_key +      path: "{{ generated_certs_dir }}/mux_shared_key" + +# services +- name: Set logging-mux service for external communication +  oc_service: +    state: present +    name: "logging-mux" +    namespace: "{{ openshift_logging_mux_namespace }}" +    selector: +      component: mux +      provider: openshift +    # pending #4091 +    #labels: +    #- logging-infra: 'support' +    ports: +    - name: mux-forward +      port: "{{ openshift_logging_mux_port }}" +      targetPort: "mux-forward" +  # pending #4091 +  #  externalIPs: +  #  - "{{ ansible_eth0.ipv4.address }}" +  when: openshift_logging_mux_allow_external | bool + +- name: Set logging-mux service for internal communication +  oc_service: +    state: present +    name: "logging-mux" +    namespace: "{{ openshift_logging_mux_namespace }}" +    selector: +      component: mux +      provider: openshift +    # pending #4091 +    #labels: +    #- logging-infra: 'support' +    ports: +    - name: mux-forward +      port: "{{ openshift_logging_mux_port }}" +      targetPort: "mux-forward" +  when: not openshift_logging_mux_allow_external | bool + +# create Mux DC +- name: Generating mux deploymentconfig +  template: +    src: mux.j2 +    dest: "{{mktemp.stdout}}/templates/logging-mux-dc.yaml" +  vars: +    component: mux +    logging_component: mux +    deploy_name: "logging-{{ component }}" +    image: "{{ openshift_logging_image_prefix }}logging-fluentd:{{ openshift_logging_image_version }}" +    es_host: "{{ openshift_logging_mux_app_host }}" +    es_port: "{{ openshift_logging_mux_app_port }}" +    ops_host: "{{ openshift_logging_mux_ops_host }}" +    ops_port: "{{ openshift_logging_mux_ops_port }}" +    mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}" +    mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}" +    replicas: "{{ openshift_logging_mux_replicas | default(1) }}" +    mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}" +  check_mode: no +  changed_when: no + +- name: Set logging-mux DC +  oc_obj: +    state: present +    name: logging-mux +    namespace: "{{ openshift_logging_mux_namespace }}" +    kind: dc +    files: +    - "{{ tempdir }}/templates/logging-mux-dc.yaml" +    delete_after: true + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index 41e6abd52..770a2bfbd 100644 --- a/roles/openshift_logging/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -7,7 +7,7 @@ metadata:      component: "{{component}}"      logging-infra: "{{logging_component}}"  spec: -  replicas: {{replicas|default(0)}} +  replicas: {{replicas|default(1)}}    selector:      provider: openshift      component: "{{component}}" @@ -26,7 +26,7 @@ spec:          provider: openshift          component: "{{component}}"      spec: -      serviceAccountName: aggregated-logging-fluentd +      serviceAccountName: aggregated-logging-mux  {% if mux_node_selector is iterable and mux_node_selector | length > 0 %}        nodeSelector:  {% for key, value in mux_node_selector.iteritems() %} @@ -68,33 +68,33 @@ spec:            readOnly: true          env:          - name: "K8S_HOST_URL" -          value: "{{openshift_logging_master_url}}" +          value: "{{openshift_logging_mux_master_url}}"          - name: "ES_HOST" -          value: "{{openshift_logging_es_host}}" +          value: "{{openshift_logging_mux_app_host}}"          - name: "ES_PORT" -          value: "{{openshift_logging_es_port}}" +          value: "{{openshift_logging_mux_app_port}}"          - name: "ES_CLIENT_CERT" -          value: "{{openshift_logging_es_client_cert}}" +          value: "{{openshift_logging_mux_app_client_cert}}"          - name: "ES_CLIENT_KEY" -          value: "{{openshift_logging_es_client_key}}" +          value: "{{openshift_logging_mux_app_client_key}}"          - name: "ES_CA" -          value: "{{openshift_logging_es_ca}}" +          value: "{{openshift_logging_mux_app_ca}}"          - name: "OPS_HOST" -          value: "{{ops_host}}" +          value: "{{openshift_logging_mux_ops_host}}"          - name: "OPS_PORT" -          value: "{{ops_port}}" +          value: "{{openshift_logging_mux_ops_port}}"          - name: "OPS_CLIENT_CERT" -          value: "{{openshift_logging_es_ops_client_cert}}" +          value: "{{openshift_logging_mux_ops_client_cert}}"          - name: "OPS_CLIENT_KEY" -          value: "{{openshift_logging_es_ops_client_key}}" +          value: "{{openshift_logging_mux_ops_client_key}}"          - name: "OPS_CA" -          value: "{{openshift_logging_es_ops_ca}}" +          value: "{{openshift_logging_mux_ops_ca}}"          - name: "USE_JOURNAL"            value: "false"          - name: "JOURNAL_SOURCE" -          value: "{{openshift_logging_fluentd_journal_source | default('')}}" +          value: "{{openshift_logging_mux_journal_source | default('')}}"          - name: "JOURNAL_READ_FROM_HEAD" -          value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" +          value: "{{openshift_logging_mux_journal_read_from_head|lower}}"          - name: FORWARD_LISTEN_HOST            value: "{{ openshift_logging_mux_hostname }}"          - name: FORWARD_LISTEN_PORT @@ -102,14 +102,14 @@ spec:          - name: USE_MUX            value: "true"          - name: MUX_ALLOW_EXTERNAL -          value: "{{ openshift_logging_mux_allow_external| default('false') }}" +          value: "{{ openshift_logging_mux_allow_external | default('false') }}"        volumes:        - name: config          configMap:            name: logging-mux        - name: certs          secret: -          secretName: logging-fluentd +          secretName: logging-mux        - name: dockerhostname          hostPath:            path: /etc/hostname diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml new file mode 100644 index 000000000..4234b74e2 --- /dev/null +++ b/roles/openshift_logging_mux/vars/main.yml @@ -0,0 +1,3 @@ +--- +__latest_mux_version: "3_5" +__allowed_mux_versions: ["3_5", "3_6"] diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py index 88801e487..a86c96df7 100644 --- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py +++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py @@ -8,7 +8,7 @@ def map_from_pairs(source, delim="="):      if source == '':          return dict() -    return dict(source.split(delim) for item in source.split(",")) +    return dict(item.split(delim) for item in source.split(","))  # pylint: disable=too-few-public-methods | 
