diff options
Diffstat (limited to 'roles')
| -rw-r--r-- | roles/openshift_logging/filter_plugins/openshift_logging.py | 29 | ||||
| -rw-r--r-- | roles/openshift_logging/library/openshift_logging_facts.py | 333 | ||||
| -rw-r--r-- | roles/openshift_logging/meta/main.yaml | 2 | ||||
| -rw-r--r-- | roles/openshift_logging/tasks/generate_secrets.yaml | 20 | ||||
| -rw-r--r-- | roles/openshift_logging/tasks/install_elasticsearch.yaml | 4 | ||||
| -rw-r--r-- | roles/openshift_logging/tasks/scale.yaml | 4 | ||||
| -rw-r--r-- | roles/openshift_logging/vars/main.yaml | 4 | 
7 files changed, 220 insertions, 176 deletions
| diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index b42d5da5f..007be3ac0 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -1,28 +1,37 @@ -import random, string -import shutil -import sys -import StringIO +''' + Openshift Logging class that provides useful filters used in Logging +''' -def random_word(source_alpha,length): +import random + + +def random_word(source_alpha, length): +    ''' Returns a random word given the source of characters to pick from and resulting length '''      return ''.join(random.choice(source_alpha) for i in range(length)) +  def entry_from_named_pair(register_pairs, key): -    from ansible.utils.display import Display +    ''' Returns the entry in key given results provided by register_pairs '''      results = register_pairs.get("results") -    if results == None: -        raise RuntimeError("The dict argument does not have a 'results' entry.  Must not have been created using 'register' in a loop") +    if results is None: +        raise RuntimeError("The dict argument does not have a 'results' entry. " +                           "Must not have been created using 'register' in a loop")      for result in results:          item = result.get("item") -        if item != None: -            name = item.get("name")  +        if item is not None: +            name = item.get("name")              if name == key:                  return result["content"]      raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key)) + +# pylint: disable=too-few-public-methods  class FilterModule(object):      ''' OpenShift Logging Filters ''' +    # pylint: disable=no-self-use, too-few-public-methods      def filters(self): +        ''' Returns the names of the filters provided by this class '''          return {              'random_word': random_word,              'entry_from_named_pair': entry_from_named_pair, diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index 1f0c25a84..8bbfdf7bf 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -1,5 +1,4 @@ - -DOCUMENTATION = """ +'''  ---  module: openshift_logging_facts  version_added: "" @@ -8,7 +7,19 @@ description:    - Determine the current facts about the OpenShift logging stack (e.g. cluster size)  options:  author: Red Hat, Inc -""" +''' + +import copy +import json + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +from subprocess import *   # noqa: F402,F403 + +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +from ansible.module_utils.basic import *   # noqa: F402,F403 + +import yaml  EXAMPLES = """  - action: opneshift_logging_facts @@ -17,238 +28,258 @@ EXAMPLES = """  RETURN = """  """ -import copy -import json -import exceptions -import yaml -from subprocess import * +DEFAULT_OC_OPTIONS = ["-o", "json"] -default_oc_options = ["-o","json"] +# constants used for various labels and selectors +COMPONENT_KEY = "component" +LOGGING_INFRA_KEY = "logging-infra" -#constants used for various labels and selectors -COMPONENT_KEY="component" -LOGGING_INFRA_KEY="logging-infra" +# selectors for filtering resources +DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd" +LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support" +ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift" +COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"] -#selectors for filtering resources -DS_FLUENTD_SELECTOR=LOGGING_INFRA_KEY + "=" + "fluentd" -LOGGING_SELECTOR=LOGGING_INFRA_KEY + "=" + "support" -ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift" -COMPONENTS = ["kibana","curator","elasticsearch","fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]  class OCBaseCommand(object): +    ''' The base class used to query openshift ''' +      def __init__(self, binary, kubeconfig, namespace): +        ''' the init method of OCBaseCommand class '''          self.binary = binary          self.kubeconfig = kubeconfig -        self.user = self.getSystemAdmin(self.kubeconfig) +        self.user = self.get_system_admin(self.kubeconfig)          self.namespace = namespace -    def getSystemAdmin(self,kubeconfig): -        with open(kubeconfig,'r') as f: -            config = yaml.load(f) +    # pylint: disable=no-self-use +    def get_system_admin(self, kubeconfig): +        ''' Retrieves the system admin ''' +        with open(kubeconfig, 'r') as kubeconfig_file: +            config = yaml.load(kubeconfig_file)              for user in config["users"]:                  if user["name"].startswith("system:admin"):                      return user["name"]          raise Exception("Unable to find system:admin in: " + kubeconfig) -    def oc(self, sub, kind, namespace=None, name=None,addOptions=[]): +    # pylint: disable=too-many-arguments, dangerous-default-value +    def oc_command(self, sub, kind, namespace=None, name=None, add_options=None): +        ''' Wrapper method for the "oc" command '''          cmd = [self.binary, sub, kind] -        if name != None: +        if name is not None:              cmd = cmd + [name] -        if namespace != None: +        if namespace is not None:              cmd = cmd + ["-n", namespace] -        cmd = cmd + ["--user="+self.user,"--config="+self.kubeconfig] + default_oc_options + addOptions +        if add_options is None: +            add_options = [] +        cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options          try: -            process = Popen(cmd, stdout=PIPE, stderr=PIPE) +            process = Popen(cmd, stdout=PIPE, stderr=PIPE)   # noqa: F405              out, err = process.communicate(cmd)              if len(err) > 0:                  if 'not found' in err: -                    return {'items':[]} +                    return {'items': []}                  if 'No resources found' in err: -                    return {'items':[]} +                    return {'items': []}                  raise Exception(err) -        except Exception as e: -            err = "There was an exception trying to run the command '"+ " ".join(cmd) +"' " + str(e) +        except Exception as excp: +            err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp)              raise Exception(err)          return json.loads(out) -class OpenshiftLoggingFacts(OCBaseCommand): +class OpenshiftLoggingFacts(OCBaseCommand): +    ''' The class structure for holding the OpenshiftLogging Facts'''      name = "facts"      def __init__(self, logger, binary, kubeconfig, namespace): +        ''' The init method for OpenshiftLoggingFacts '''          super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)          self.logger = logger          self.facts = dict() -    def defaultKeysFor(self, kind): +    def default_keys_for(self, kind): +        ''' Sets the default key values for kind '''          for comp in COMPONENTS: -            self.addFactsFor(comp, kind) +            self.add_facts_for(comp, kind) -    def addFactsFor(self, comp, kind, name=None, facts=None): -        if self.facts.has_key(comp) == False: +    def add_facts_for(self, comp, kind, name=None, facts=None): +        ''' Add facts for the provided kind ''' +        if comp in self.facts is False:              self.facts[comp] = dict() -        if self.facts[comp].has_key(kind) == False: +        if kind in self.facts[comp] is False:              self.facts[comp][kind] = dict()          if name: -           self.facts[comp][kind][name] = facts +            self.facts[comp][kind][name] = facts -    def factsForRoutes(self, namespace): -        self.defaultKeysFor("routes") -        routeList =  self.oc("get","routes", namespace=namespace, addOptions=["-l",ROUTE_SELECTOR]) -        if len(routeList["items"]) == 0: +    def facts_for_routes(self, namespace): +        ''' Gathers facts for Routes in logging namespace ''' +        self.default_keys_for("routes") +        route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR]) +        if len(route_list["items"]) == 0:              return None -        for route in routeList["items"]: +        for route in route_list["items"]:              name = route["metadata"]["name"]              comp = self.comp(name) -            if comp != None: -                self.addFactsFor(comp, "routes", name, dict(host=route["spec"]["host"])) +            if comp is not None: +                self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"]))          self.facts["agl_namespace"] = namespace - -    def factsForDaemonsets(self, namespace): -        self.defaultKeysFor("daemonsets") -        dsList = self.oc("get", "daemonsets", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY+"=fluentd"]) -        if len(dsList["items"]) == 0: +    def facts_for_daemonsets(self, namespace): +        ''' Gathers facts for Daemonsets in logging namespace ''' +        self.default_keys_for("daemonsets") +        ds_list = self.oc_command("get", "daemonsets", namespace=namespace, +                                  add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"]) +        if len(ds_list["items"]) == 0:              return -        for ds in dsList["items"]: -            name = ds["metadata"]["name"] +        for ds_item in ds_list["items"]: +            name = ds_item["metadata"]["name"]              comp = self.comp(name) -            spec = ds["spec"]["template"]["spec"] +            spec = ds_item["spec"]["template"]["spec"]              container = spec["containers"][0]              result = dict( -                selector = ds["spec"]["selector"], -                image = container["image"], -                resources = container["resources"], -                nodeSelector = spec["nodeSelector"], -                serviceAccount = spec["serviceAccount"], -                terminationGracePeriodSeconds = spec["terminationGracePeriodSeconds"] +                selector=ds_item["spec"]["selector"], +                image=container["image"], +                resources=container["resources"], +                nodeSelector=spec["nodeSelector"], +                serviceAccount=spec["serviceAccount"], +                terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"]              ) -            self.addFactsFor(comp, "daemonsets", name, result) +            self.add_facts_for(comp, "daemonsets", name, result) -    def factsForPvcs(self, namespace): -        self.defaultKeysFor("pvcs") -        pvclist = self.oc("get", "pvc", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY]) +    def facts_for_pvcs(self, namespace): +        ''' Gathers facts for PVCS in logging namespace''' +        self.default_keys_for("pvcs") +        pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])          if len(pvclist["items"]) == 0:              return -        pvcs = []          for pvc in pvclist["items"]:              name = pvc["metadata"]["name"]              comp = self.comp(name) -            self.addFactsFor(comp,"pvcs",name,dict()) +            self.add_facts_for(comp, "pvcs", name, dict()) -    def factsForDeploymentConfigs(self, namespace): -        self.defaultKeysFor("deploymentconfigs") -        dclist = self.oc("get", "deploymentconfigs", namespace=namespace, addOptions=["-l",LOGGING_INFRA_KEY]) +    def facts_for_deploymentconfigs(self, namespace): +        ''' Gathers facts for DeploymentConfigs in logging namespace ''' +        self.default_keys_for("deploymentconfigs") +        dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])          if len(dclist["items"]) == 0:              return          dcs = dclist["items"] -        for dc in dcs: -            name = dc["metadata"]["name"] +        for dc_item in dcs: +            name = dc_item["metadata"]["name"]              comp = self.comp(name) -            if comp != None: -                spec = dc["spec"]["template"]["spec"] +            if comp is not None: +                spec = dc_item["spec"]["template"]["spec"]                  facts = dict( -                    selector = dc["spec"]["selector"], -                    replicas = dc["spec"]["replicas"], -                    serviceAccount = spec["serviceAccount"], -                    containers = dict(), -                    volumes = dict() +                    selector=dc_item["spec"]["selector"], +                    replicas=dc_item["spec"]["replicas"], +                    serviceAccount=spec["serviceAccount"], +                    containers=dict(), +                    volumes=dict()                  ) -                if spec.has_key("volumes"): +                if "volumes" in spec:                      for vol in spec["volumes"]:                          clone = copy.deepcopy(vol)                          clone.pop("name", None)                          facts["volumes"][vol["name"]] = clone                  for container in spec["containers"]:                      facts["containers"][container["name"]] = dict( -                        image = container["image"], -                        resources = container["resources"], +                        image=container["image"], +                        resources=container["resources"],                      ) -                self.addFactsFor(comp,"deploymentconfigs",name,facts) +                self.add_facts_for(comp, "deploymentconfigs", name, facts) -    def factsForServices(self, namespace): -        self.defaultKeysFor("services") -        servicelist = self.oc("get", "services", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) +    def facts_for_services(self, namespace): +        ''' Gathers facts for services in logging namespace ''' +        self.default_keys_for("services") +        servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])          if len(servicelist["items"]) == 0:              return          for service in servicelist["items"]:              name = service["metadata"]["name"]              comp = self.comp(name) -            if comp != None: -                self.addFactsFor(comp, "services", name, dict()) - -    def factsForConfigMaps(self, namespace): -        self.defaultKeysFor("configmaps") -        aList = self.oc("get", "configmaps", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) -        if len(aList["items"]) == 0: +            if comp is not None: +                self.add_facts_for(comp, "services", name, dict()) + +    def facts_for_configmaps(self, namespace): +        ''' Gathers facts for configmaps in logging namespace ''' +        self.default_keys_for("configmaps") +        a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR]) +        if len(a_list["items"]) == 0:              return -        for item in aList["items"]: +        for item in a_list["items"]:              name = item["metadata"]["name"]              comp = self.comp(name) -            if comp != None: -                self.addFactsFor(comp, "configmaps", name, item["data"]) - -    def factsForOAuthClients(self, namespace): -        self.defaultKeysFor("oauthclients") -        aList = self.oc("get", "oauthclients", namespace=namespace, addOptions=["-l",LOGGING_SELECTOR]) -        if len(aList["items"]) == 0: +            if comp is not None: +                self.add_facts_for(comp, "configmaps", name, item["data"]) + +    def facts_for_oauthclients(self, namespace): +        ''' Gathers facts for oauthclients used with logging ''' +        self.default_keys_for("oauthclients") +        a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR]) +        if len(a_list["items"]) == 0:              return -        for item in aList["items"]: +        for item in a_list["items"]:              name = item["metadata"]["name"]              comp = self.comp(name) -            if comp != None: +            if comp is not None:                  result = dict( -                    redirectURIs = item["redirectURIs"] +                    redirectURIs=item["redirectURIs"]                  ) -                self.addFactsFor(comp, "oauthclients", name, result) +                self.add_facts_for(comp, "oauthclients", name, result) -    def factsForSecrets(self, namespace): -        self.defaultKeysFor("secrets") -        aList = self.oc("get", "secrets", namespace=namespace) -        if len(aList["items"]) == 0: +    def facts_for_secrets(self, namespace): +        ''' Gathers facts for secrets in the logging namespace ''' +        self.default_keys_for("secrets") +        a_list = self.oc_command("get", "secrets", namespace=namespace) +        if len(a_list["items"]) == 0:              return -        for item in aList["items"]: +        for item in a_list["items"]:              name = item["metadata"]["name"]              comp = self.comp(name) -            if comp != None and item["type"] == "Opaque": +            if comp is not None and item["type"] == "Opaque":                  result = dict( -                    keys = item["data"].keys() +                    keys=item["data"].keys()                  ) -                self.addFactsFor(comp, "secrets", name, result) +                self.add_facts_for(comp, "secrets", name, result) -    def factsForSCCs(self, namespace): -        self.defaultKeysFor("sccs") -        scc = self.oc("get", "scc", name="privileged") +    def facts_for_sccs(self): +        ''' Gathers facts for SCCs used with logging ''' +        self.default_keys_for("sccs") +        scc = self.oc_command("get", "scc", name="privileged")          if len(scc["users"]) == 0:              return          for item in scc["users"]:              comp = self.comp(item) -            if comp != None: -                self.addFactsFor(comp, "sccs", "privileged", dict()) +            if comp is not None: +                self.add_facts_for(comp, "sccs", "privileged", dict()) -    def factsForClusterRoleBindings(self, namespace): -        self.defaultKeysFor("clusterrolebindings") -        role = self.oc("get", "clusterrolebindings", name="cluster-readers") -        if "subjects" not in role or  len(role["subjects"]) == 0: +    def facts_for_clusterrolebindings(self, namespace): +        ''' Gathers ClusterRoleBindings used with logging ''' +        self.default_keys_for("clusterrolebindings") +        role = self.oc_command("get", "clusterrolebindings", name="cluster-readers") +        if "subjects" not in role or len(role["subjects"]) == 0:              return          for item in role["subjects"]:              comp = self.comp(item["name"]) -            if comp != None and namespace == item["namespace"]: -                self.addFactsFor(comp, "clusterrolebindings", "cluster-readers", dict()) +            if comp is not None and namespace == item["namespace"]: +                self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())  # this needs to end up nested under the service account... -    def factsForRoleBindings(self, namespace): -        self.defaultKeysFor("rolebindings") -        role = self.oc("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role") +    def facts_for_rolebindings(self, namespace): +        ''' Gathers facts for RoleBindings used with logging ''' +        self.default_keys_for("rolebindings") +        role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")          if "subjects" not in role or len(role["subjects"]) == 0:              return          for item in role["subjects"]:              comp = self.comp(item["name"]) -            if comp != None and namespace == item["namespace"]: -                self.addFactsFor(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) +            if comp is not None and namespace == item["namespace"]: +                self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict()) +    # pylint: disable=no-self-use, too-many-return-statements      def comp(self, name): +        ''' Does a comparison to evaluate the logging component '''          if name.startswith("logging-curator-ops"):              return "curator_ops"          elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"): @@ -266,38 +297,44 @@ class OpenshiftLoggingFacts(OCBaseCommand):          else:              return None -    def do(self): -        self.factsForRoutes(self.namespace) -        self.factsForDaemonsets(self.namespace) -        self.factsForDeploymentConfigs(self.namespace) -        self.factsForServices(self.namespace) -        self.factsForConfigMaps(self.namespace) -        self.factsForSCCs(self.namespace) -        self.factsForOAuthClients(self.namespace) -        self.factsForClusterRoleBindings(self.namespace) -        self.factsForRoleBindings(self.namespace) -        self.factsForSecrets(self.namespace) -        self.factsForPvcs(self.namespace) +    def build_facts(self): +        ''' Builds the logging facts and returns them ''' +        self.facts_for_routes(self.namespace) +        self.facts_for_daemonsets(self.namespace) +        self.facts_for_deploymentconfigs(self.namespace) +        self.facts_for_services(self.namespace) +        self.facts_for_configmaps(self.namespace) +        self.facts_for_sccs() +        self.facts_for_oauthclients(self.namespace) +        self.facts_for_clusterrolebindings(self.namespace) +        self.facts_for_rolebindings(self.namespace) +        self.facts_for_secrets(self.namespace) +        self.facts_for_pvcs(self.namespace)          return self.facts +  def main(): -    module = AnsibleModule( +    ''' The main method ''' +    module = AnsibleModule(   # noqa: F405          argument_spec=dict( -            admin_kubeconfig = {"required": True, "type": "str"}, -            oc_bin = {"required": True, "type": "str"}, -            openshift_logging_namespace = {"required": True, "type": "str"} +            admin_kubeconfig={"required": True, "type": "str"}, +            oc_bin={"required": True, "type": "str"}, +            openshift_logging_namespace={"required": True, "type": "str"}          ), -        supports_check_mode = False +        supports_check_mode=False      )      try: -        cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],module.params['openshift_logging_namespace']) +        cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'], +                                    module.params['openshift_logging_namespace'])          module.exit_json( -                ansible_facts = {"openshift_logging_facts": cmd.do() } +            ansible_facts={"openshift_logging_facts": cmd.build_facts()}          ) -    except Exception as e: -        module.fail_json(msg=str(e)) +    # ignore broad-except error to avoid stack trace to ansible user +    # pylint: disable=broad-except +    except Exception as error: +        module.fail_json(msg=str(error)) + -from ansible.module_utils.basic import *  if __name__ == '__main__':      main() diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml index a95c84901..7050e51db 100644 --- a/roles/openshift_logging/meta/main.yaml +++ b/roles/openshift_logging/meta/main.yaml @@ -12,4 +12,4 @@ galaxy_info:    categories:    - cloud  dependencies: -  - role: openshift_facts +- role: openshift_facts diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml index c4a70114d..1829acaee 100644 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -21,9 +21,9 @@      secret_key_file: "{{component}}_key"      secret_cert_file: "{{component}}_cert"      secrets: -     - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} -     - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} -     - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} +      - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} +      - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} +      - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}      secret_keys: ["ca", "cert", "key"]    with_items:      - kibana @@ -41,11 +41,11 @@    vars:      secret_name: logging-kibana-proxy      secrets: -     - {key: oauth-secret, value: "{{oauth_secret}}"} -     - {key: session-secret, value: "{{session_secret}}"} -     - {key: server-key, value: "{{kibana_key_file}}"} -     - {key: server-cert, value: "{{kibana_cert_file}}"} -     - {key: server-tls, value: "{{server_tls_file}}"} +      - {key: oauth-secret, value: "{{oauth_secret}}"} +      - {key: session-secret, value: "{{session_secret}}"} +      - {key: server-key, value: "{{kibana_key_file}}"} +      - {key: server-cert, value: "{{kibana_cert_file}}"} +      - {key: server-tls, value: "{{server_tls_file}}"}      secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]      kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"      kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}" @@ -63,8 +63,8 @@      admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt      admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml    vars: -     secret_name: logging-elasticsearch -     secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"] +    secret_name: logging-elasticsearch +    secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"]    register: logging_es_secret    when: secret_name not in openshift_logging_facts.elasticsearch.secrets or          secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0 diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index b1f8855c4..fbba46a35 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -5,7 +5,7 @@      es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"      es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"    when: -   - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" +    - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"  - name: Init pool of DeploymentConfig names for Elasticsearch    set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}} @@ -16,7 +16,7 @@      deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"    with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}}    when: -   - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" +    - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"    check_mode: no diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml index 42e9f0eb6..125d3b8af 100644 --- a/roles/openshift_logging/tasks/scale.yaml +++ b/roles/openshift_logging/tasks/scale.yaml @@ -23,6 +23,6 @@    retries: 30    delay: 10    when: -    - not ansible_check_mode -    - replica_count.stdout|int != desired +  - not ansible_check_mode +  - replica_count.stdout|int != desired    changed_when: no diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml index 4725820da..11662c446 100644 --- a/roles/openshift_logging/vars/main.yaml +++ b/roles/openshift_logging/vars/main.yaml @@ -1,10 +1,8 @@ - +---  openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -  es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}"  es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}"  es_recover_expected_nodes: "{{openshift_logging_es_cluster_size}}" -  es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size/2 + 1}}"  es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size - 1}}"  es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size}}" | 
