diff options
Diffstat (limited to 'roles')
| -rw-r--r-- | roles/lib_openshift_api/library/oc_deploymentconfig.py | 377 | ||||
| -rw-r--r-- | roles/lib_openshift_api/library/oc_secret.py (renamed from roles/lib_openshift_api/library/oc_secrets.py) | 419 | ||||
| -rw-r--r-- | roles/lib_openshift_api/library/oc_service.py | 378 | ||||
| -rwxr-xr-x | roles/openshift_facts/library/openshift_facts.py | 1 | ||||
| -rw-r--r-- | roles/openshift_manageiq/tasks/main.yaml | 13 | ||||
| -rw-r--r-- | roles/openshift_manageiq/vars/main.yml | 8 | ||||
| -rw-r--r-- | roles/openshift_node/tasks/main.yml | 1 | ||||
| -rw-r--r-- | roles/openshift_node/templates/node.yaml.v1.j2 | 3 | ||||
| -rw-r--r-- | roles/openshift_node/templates/openshift.docker.node.service | 2 | ||||
| -rw-r--r-- | roles/openshift_serviceaccounts/tasks/main.yml | 3 | 
10 files changed, 1008 insertions, 197 deletions
| diff --git a/roles/lib_openshift_api/library/oc_deploymentconfig.py b/roles/lib_openshift_api/library/oc_deploymentconfig.py new file mode 100644 index 000000000..fbdaa8e9c --- /dev/null +++ b/roles/lib_openshift_api/library/oc_deploymentconfig.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python +''' +  OpenShiftCLI class that wraps the oc commands in a subprocess +''' +import atexit +import json +import os +import shutil +import subprocess +import yaml + +class OpenShiftCLI(object): +    ''' Class to wrap the oc command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = kubeconfig + +    def replace(self, fname, force=False): +        '''return all pods ''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd = ['replace', '--force', '-f', fname] +        return self.oc_cmd(cmd) + +    def create(self, fname): +        '''return all pods ''' +        return self.oc_cmd(['create', '-f', fname, '-n', self.namespace]) + +    def delete(self, resource, rname): +        '''return all pods ''' +        return self.oc_cmd(['delete', resource, rname, '-n', self.namespace]) + +    def get(self, resource, rname=None): +        '''return a secret by name ''' +        cmd = ['get', resource, '-o', 'json', '-n', self.namespace] +        if rname: +            cmd.append(rname) + +        rval = self.oc_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if rval.has_key('items'): +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def oc_cmd(self, cmd, output=False): +        '''Base command for oc ''' +        #cmds = ['/usr/bin/oc', '--config', self.kubeconfig] +        cmds = ['/usr/bin/oc'] +        cmds.extend(cmd) + +        results = '' + +        if self.verbose: +            print ' '.join(cmds) + +        proc = subprocess.Popen(cmds, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env={'KUBECONFIG': self.kubeconfig}) +        proc.wait() +        if proc.returncode == 0: +            if output: +                try: +                    results = json.loads(proc.stdout.read()) +                except ValueError as err: +                    if "No JSON object could be decoded" in err.message: +                        results = err.message + +            if self.verbose: +                print proc.stderr.read() +                print results +                print + +            return {"returncode": proc.returncode, "results": results} + +        return {"returncode": proc.returncode, +                "stderr": proc.stderr.read(), +                "stdout": proc.stdout.read(), +                "results": {} +               } + +class Utils(object): +    ''' utilities for openshiftcli modules ''' +    @staticmethod +    def create_file(rname, data, ftype=None): +        ''' create a file in tmp with name and contents''' +        path = os.path.join('/tmp', rname) +        with open(path, 'w') as fds: +            if ftype == 'yaml': +                fds.write(yaml.dump(data, default_flow_style=False)) + +            elif ftype == 'json': +                fds.write(json.dumps(data)) +            else: +                fds.write(data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [path]) +        return path + +    @staticmethod +    def create_files_from_contents(data): +        '''Turn an array of dict: filename, content into a files array''' +        files = [] + +        for sfile in data: +            path = Utils.create_file(sfile['path'], sfile['content']) +            files.append(path) + +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if result.has_key('metadata') and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file  ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            contents = yaml.load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def check_def_equal(user_def, result_def, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if not isinstance(user_def[key], list): +                    return False + +                # lists should be identical +                if value != user_def[key]: +                    return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if not isinstance(user_def[key], dict): +                    if debug: +                        print "dict returned false not instance of dict" +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print api_values +                        print user_values +                        print "keys are not equal in dict" +                    return False + +                result = Utils.check_def_equal(user_def[key], value, debug=debug) +                if not result: +                    if debug: +                        print "dict returned false" +                    return False + +            # Verify each key, value pair is the same +            else: +                if not user_def.has_key(key) or value != user_def[key]: +                    if debug: +                        print "value not equal; user_def does not have key" +                        print value +                        print user_def[key] +                    return False + +        return True + +class DeploymentConfig(OpenShiftCLI): +    ''' Class to wrap the oc command line tools +    ''' +    def __init__(self, +                 namespace, +                 dname=None, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        super(DeploymentConfig, self).__init__(namespace, kubeconfig) +        self.namespace = namespace +        self.name = dname +        self.kubeconfig = kubeconfig +        self.verbose = verbose + +    def get_dc(self): +        '''return a deploymentconfig by name ''' +        return self.get('dc', self.name) + +    def delete_dc(self): +        '''return all pods ''' +        return self.delete('dc', self.name) + +    def new_dc(self, dfile): +        '''Create a deploymentconfig ''' +        return self.create(dfile) + +    def update_dc(self, dfile, force=False): +        '''run update dc + +           This receives a list of file names and takes the first filename and calls replace. +        ''' +        return self.replace(dfile, force) + + +# pylint: disable=too-many-branches +def main(): +    ''' +    ansible oc module for deploymentconfig +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            name=dict(default=None, type='str'), +            deploymentconfig_file=dict(default=None, type='str'), +            input_type=dict(default='yaml', choices=['yaml', 'json'], type='str'), +            delete_after=dict(default=False, type='bool'), +            content=dict(default=None, type='dict'), +            force=dict(default=False, type='bool'), +        ), +        mutually_exclusive=[["contents", "deploymentconfig_file"]], + +        supports_check_mode=True, +    ) +    occmd = DeploymentConfig(module.params['namespace'], +                             dname=module.params['name'], +                             kubeconfig=module.params['kubeconfig'], +                             verbose=module.params['debug']) + +    state = module.params['state'] + +    api_rval = occmd.get_dc() + +    ##### +    # Get +    ##### +    if state == 'list': +        module.exit_json(changed=False, results=api_rval['results'], state="list") + +    if not module.params['name']: +        module.fail_json(msg='Please specify a name when state is absent|present.') +    ######## +    # Delete +    ######## +    if state == 'absent': +        if not Utils.exists(api_rval['results'], module.params['name']): +            module.exit_json(changed=False, state="absent") + +        if module.check_mode: +            module.exit_json(change=False, msg='Would have performed a delete.') + +        api_rval = occmd.delete_dc() +        module.exit_json(changed=True, results=api_rval, state="absent") + + +    if state == 'present': +        if module.params['deploymentconfig_file']: +            dfile = module.params['deploymentconfig_file'] +        elif module.params['content']: +            dfile = Utils.create_file('dc', module.params['content']) +        else: +            module.fail_json(msg="Please specify content or deploymentconfig file.") + +        ######## +        # Create +        ######## +        if not Utils.exists(api_rval['results'], module.params['name']): + +            if module.check_mode: +                module.exit_json(change=False, msg='Would have performed a create.') + +            api_rval = occmd.new_dc(dfile) + +            # Remove files +            if module.params['deploymentconfig_file'] and module.params['delete_after']: +                Utils.cleanup([dfile]) + +            if api_rval['returncode'] != 0: +                module.fail_json(msg=api_rval) + +            module.exit_json(changed=True, results=api_rval, state="present") + +        ######## +        # Update +        ######## +        if Utils.check_def_equal(Utils.get_resource_file(dfile), api_rval['results'][0]): + +            # Remove files +            if module.params['deploymentconfig_file'] and module.params['delete_after']: +                Utils.cleanup([dfile]) + +            module.exit_json(changed=False, results=api_rval['results'], state="present") + +        if module.check_mode: +            module.exit_json(change=False, msg='Would have performed an update.') + +        api_rval = occmd.update_dc(dfile, force=module.params['force']) + +        # Remove files +        if module.params['deploymentconfig_file'] and module.params['delete_after']: +            Utils.cleanup([dfile]) + +        if api_rval['returncode'] != 0: +            module.fail_json(msg=api_rval) + + +        module.exit_json(changed=True, results=api_rval, state="present") + +    module.exit_json(failed=True, +                     changed=False, +                     results='Unknown state passed. %s' % state, +                     state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets.  This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_openshift_api/library/oc_secrets.py b/roles/lib_openshift_api/library/oc_secret.py index 841c14692..96a0f1db1 100644 --- a/roles/lib_openshift_api/library/oc_secrets.py +++ b/roles/lib_openshift_api/library/oc_secret.py @@ -1,72 +1,45 @@  #!/usr/bin/env python  ''' -module for openshift cloud secrets +  OpenShiftCLI class that wraps the oc commands in a subprocess  ''' -#   Examples: -# -#  # to initiate and use /etc/origin/master/admin.kubeconfig file for auth -#  - name: list secrets -#    oc_secrets: -#      state: list -#      namespace: default -# -#  # To get a specific secret named 'mysecret' -#  - name: list secrets -#    oc_secrets: -#      state: list -#      namespace: default -#      name: mysecret -# -#   # To create a secret: -#   # This module expects the user to place the files on the remote server and pass them in. -#  - name: create a secret from file -#    oc_secrets: -#      state: present -#      namespace: default -#      name: mysecret -#      files: -#      - /tmp/config.yml -#      - /tmp/passwords.yml -#      delete_after: False - -#   # To create a secret: -#   # This module expects the user to place the files on the remote server and pass them in. -#  - name: create a secret from content -#    oc_secrets: -#      state: present -#      namespace: default -#      name: mysecret -#      contents: -#      - path: /tmp/config.yml -#        content: "value=True\n" -#      - path: /tmp/passwords.yml -#        content: "test1\ntest2\ntest3\ntest4\n" -# - +import atexit +import json  import os  import shutil -import json -import atexit +import subprocess +import yaml -class OpenShiftOC(object): -    ''' Class to wrap the oc command line tools -    ''' +class OpenShiftCLI(object): +    ''' Class to wrap the oc command line tools '''      def __init__(self,                   namespace, -                 secret_name=None,                   kubeconfig='/etc/origin/master/admin.kubeconfig',                   verbose=False):          ''' Constructor for OpenshiftOC '''          self.namespace = namespace -        self.name = secret_name          self.verbose = verbose          self.kubeconfig = kubeconfig -    def get_secrets(self): +    def replace(self, fname, force=False): +        '''return all pods ''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd = ['replace', '--force', '-f', fname] +        return self.oc_cmd(cmd) + +    def create(self, fname): +        '''return all pods ''' +        return self.oc_cmd(['create', '-f', fname, '-n', self.namespace]) + +    def delete(self, resource, rname): +        '''return all pods ''' +        return self.oc_cmd(['delete', resource, rname, '-n', self.namespace]) + +    def get(self, resource, rname=None):          '''return a secret by name ''' -        cmd = ['get', 'secrets', '-o', 'json', '-n', self.namespace] -        if self.name: -            cmd.append(self.name) +        cmd = ['get', resource, '-o', 'json', '-n', self.namespace] +        if rname: +            cmd.append(rname)          rval = self.oc_cmd(cmd, output=True) @@ -78,65 +51,9 @@ class OpenShiftOC(object):          return rval -    def delete_secret(self): -        '''return all pods ''' -        return self.oc_cmd(['delete', 'secrets', self.name, '-n', self.namespace]) - -    def secret_new(self, files): -        '''Create a secret with  all pods ''' -        secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] -        cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name] -        cmd.extend(secrets) - -        return self.oc_cmd(cmd) - -    @staticmethod -    def create_files_from_contents(data): -        '''Turn an array of dict: filename, content into a files array''' -        files = [] -        for sfile in data: -            with open(sfile['path'], 'w') as fds: -                fds.write(sfile['content']) -            files.append(sfile['path']) - -        # Register cleanup when module is done -        atexit.register(OpenShiftOC.cleanup, files) -        return files - -    def update_secret(self, files, force=False): -        '''run update secret - -           This receives a list of file names and converts it into a secret. -           The secret is then written to disk and passed into the `oc replace` command. -        ''' -        secret = self.prep_secret(files) -        if secret['returncode'] != 0: -            return secret - -        sfile_path = '/tmp/%s' % secret['results']['metadata']['name'] -        with open(sfile_path, 'w') as sfd: -            sfd.write(json.dumps(secret['results'])) - -        cmd = ['replace', '-f', sfile_path] -        if force: -            cmd = ['replace', '--force', '-f', sfile_path] - -        atexit.register(OpenShiftOC.cleanup, [sfile_path]) - -        return self.oc_cmd(cmd) - -    def prep_secret(self, files): -        ''' return what the secret would look like if created -            This is accomplished by passing -ojson.  This will most likely change in the future -        ''' -        secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] -        cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name] -        cmd.extend(secrets) - -        return self.oc_cmd(cmd, output=True) -      def oc_cmd(self, cmd, output=False):          '''Base command for oc ''' +        #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]          cmds = ['/usr/bin/oc']          cmds.extend(cmd) @@ -171,6 +88,36 @@ class OpenShiftOC(object):                  "results": {}                 } +class Utils(object): +    ''' utilities for openshiftcli modules ''' +    @staticmethod +    def create_file(rname, data, ftype=None): +        ''' create a file in tmp with name and contents''' +        path = os.path.join('/tmp', rname) +        with open(path, 'w') as fds: +            if ftype == 'yaml': +                fds.write(yaml.dump(data, default_flow_style=False)) + +            elif ftype == 'json': +                fds.write(json.dumps(data)) +            else: +                fds.write(data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [path]) +        return path + +    @staticmethod +    def create_files_from_contents(data): +        '''Turn an array of dict: filename, content into a files array''' +        files = [] + +        for sfile in data: +            path = Utils.create_file(sfile['path'], sfile['content']) +            files.append(path) + +        return files +      @staticmethod      def cleanup(files):          '''Clean up on exit ''' @@ -182,83 +129,167 @@ class OpenShiftOC(object):                      os.remove(sfile) -def exists(results, _name): -    ''' Check to see if the results include the name ''' -    if not results: +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + + +        if Utils.find_result(results, _name): +            return True +          return False -    if find_result(results, _name): +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if result.has_key('metadata') and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file  ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            contents = yaml.load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def check_def_equal(user_def, result_def, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if not isinstance(user_def[key], list): +                    return False + +                # lists should be identical +                if value != user_def[key]: +                    return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if not isinstance(user_def[key], dict): +                    if debug: +                        print "dict returned false not instance of dict" +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print api_values +                        print user_values +                        print "keys are not equal in dict" +                    return False + +                result = Utils.check_def_equal(user_def[key], value, debug=debug) +                if not result: +                    if debug: +                        print "dict returned false" +                    return False + +            # Verify each key, value pair is the same +            else: +                if not user_def.has_key(key) or value != user_def[key]: +                    if debug: +                        print "value not equal; user_def does not have key" +                        print value +                        print user_def[key] +                    return False +          return True -    return False - -def find_result(results, _name): -    ''' Find the specified result by name''' -    rval = None -    for result in results: -        #print "%s == %s" % (result['metadata']['name'], name) -        if result.has_key('metadata') and result['metadata']['name'] == _name: -            rval = result -            break - -    return rval - -# Disabling too-many-branches.  This is a yaml dictionary comparison function -# pylint: disable=too-many-branches,too-many-return-statements -def check_def_equal(user_def, result_def, debug=False): -    ''' Given a user defined definition, compare it with the results given back by our query.  ''' - -    # Currently these values are autogenerated and we do not need to check them -    skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] - -    for key, value in result_def.items(): -        if key in skip: -            continue - -        # Both are lists -        if isinstance(value, list): -            if not isinstance(user_def[key], list): -                return False - -            # lists should be identical -            if value != user_def[key]: -                return False - -        # recurse on a dictionary -        elif isinstance(value, dict): -            if not isinstance(user_def[key], dict): -                if debug: -                    print "dict returned false not instance of dict" -                return False - -            # before passing ensure keys match -            api_values = set(value.keys()) - set(skip) -            user_values = set(user_def[key].keys()) - set(skip) -            if api_values != user_values: -                if debug: -                    print api_values -                    print user_values -                    print "keys are not equal in dict" -                return False - -            result = check_def_equal(user_def[key], value) -            if not result: -                if debug: -                    print "dict returned false" -                return False - -        # Verify each key, value pair is the same -        else: -            if not user_def.has_key(key) or value != user_def[key]: -                if debug: -                    print "value not equal; user_def does not have key" -                    print value -                    print user_def[key] -                return False +class Secret(OpenShiftCLI): +    ''' Class to wrap the oc command line tools +    ''' +    def __init__(self, +                 namespace, +                 secret_name=None, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        super(Secret, self).__init__(namespace, kubeconfig) +        self.namespace = namespace +        self.name = secret_name +        self.kubeconfig = kubeconfig +        self.verbose = verbose + +    def get_secrets(self): +        '''return a secret by name ''' +        return self.get('secrets', self.name) + +    def delete_secret(self): +        '''return all pods ''' +        return self.delete('secrets', self.name) + +    def secret_new(self, files=None, contents=None): +        '''Create a secret with  all pods ''' +        if not files: +            files = Utils.create_files_from_contents(contents) + +        secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] +        cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name] +        cmd.extend(secrets) + +        return self.oc_cmd(cmd) + +    def update_secret(self, files, force=False): +        '''run update secret + +           This receives a list of file names and converts it into a secret. +           The secret is then written to disk and passed into the `oc replace` command. +        ''' +        secret = self.prep_secret(files) +        if secret['returncode'] != 0: +            return secret + +        sfile_path = '/tmp/%s' % self.name +        with open(sfile_path, 'w') as sfd: +            sfd.write(json.dumps(secret['results'])) + +        atexit.register(Utils.cleanup, [sfile_path]) + +        return self.replace(sfile_path, force=force) + +    def prep_secret(self, files=None, contents=None): +        ''' return what the secret would look like if created +            This is accomplished by passing -ojson.  This will most likely change in the future +        ''' +        if not files: +            files = Utils.create_files_from_contents(contents) + +        secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files] +        cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name] +        cmd.extend(secrets) + +        return self.oc_cmd(cmd, output=True) -    return True +# pylint: disable=too-many-branches  def main():      '''      ansible oc module for secrets @@ -281,10 +312,10 @@ def main():          supports_check_mode=True,      ) -    occmd = OpenShiftOC(module.params['namespace'], -                        module.params['name'], -                        kubeconfig=module.params['kubeconfig'], -                        verbose=module.params['debug']) +    occmd = Secret(module.params['namespace'], +                   module.params['name'], +                   kubeconfig=module.params['kubeconfig'], +                   verbose=module.params['debug'])      state = module.params['state'] @@ -302,7 +333,7 @@ def main():      # Delete      ########      if state == 'absent': -        if not exists(api_rval['results'], module.params['name']): +        if not Utils.exists(api_rval['results'], module.params['name']):              module.exit_json(changed=False, state="absent")          if module.check_mode: @@ -316,39 +347,39 @@ def main():          if module.params['files']:              files = module.params['files']          elif module.params['contents']: -            files = OpenShiftOC.create_files_from_contents(module.params['contents']) +            files = Utils.create_files_from_contents(module.params['contents'])          else:              module.fail_json(msg='Either specify files or contents.')          ########          # Create          ######## -        if not exists(api_rval['results'], module.params['name']): +        if not Utils.exists(api_rval['results'], module.params['name']):              if module.check_mode:                  module.exit_json(change=False, msg='Would have performed a create.') -            api_rval = occmd.secret_new(files) +            api_rval = occmd.secret_new(module.params['files'], module.params['contents'])              # Remove files              if files and module.params['delete_after']: -                OpenShiftOC.cleanup(files) +                Utils.cleanup(files)              module.exit_json(changed=True, results=api_rval, state="present")          ########          # Update          ######## -        secret = occmd.prep_secret(files) +        secret = occmd.prep_secret(module.params['files'], module.params['contents'])          if secret['returncode'] != 0:              module.fail_json(msg=secret) -        if check_def_equal(secret['results'], api_rval['results'][0]): +        if Utils.check_def_equal(secret['results'], api_rval['results'][0]):              # Remove files              if files and module.params['delete_after']: -                OpenShiftOC.cleanup(files) +                Utils.cleanup(files)              module.exit_json(changed=False, results=secret['results'], state="present") @@ -358,8 +389,8 @@ def main():          api_rval = occmd.update_secret(files, force=module.params['force'])          # Remove files -        if files and module.params['delete_after']: -            OpenShiftOC.cleanup(files) +        if secret and module.params['delete_after']: +            Utils.cleanup(files)          if api_rval['returncode'] != 0:              module.fail_json(msg=api_rval) diff --git a/roles/lib_openshift_api/library/oc_service.py b/roles/lib_openshift_api/library/oc_service.py new file mode 100644 index 000000000..e7bd2514e --- /dev/null +++ b/roles/lib_openshift_api/library/oc_service.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python +''' +  OpenShiftCLI class that wraps the oc commands in a subprocess +''' +import atexit +import json +import os +import shutil +import subprocess +import yaml + +class OpenShiftCLI(object): +    ''' Class to wrap the oc command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = kubeconfig + +    def replace(self, fname, force=False): +        '''return all pods ''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd = ['replace', '--force', '-f', fname] +        return self.oc_cmd(cmd) + +    def create(self, fname): +        '''return all pods ''' +        return self.oc_cmd(['create', '-f', fname, '-n', self.namespace]) + +    def delete(self, resource, rname): +        '''return all pods ''' +        return self.oc_cmd(['delete', resource, rname, '-n', self.namespace]) + +    def get(self, resource, rname=None): +        '''return a secret by name ''' +        cmd = ['get', resource, '-o', 'json', '-n', self.namespace] +        if rname: +            cmd.append(rname) + +        rval = self.oc_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if rval.has_key('items'): +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def oc_cmd(self, cmd, output=False): +        '''Base command for oc ''' +        #cmds = ['/usr/bin/oc', '--config', self.kubeconfig] +        cmds = ['/usr/bin/oc'] +        cmds.extend(cmd) + +        results = '' + +        if self.verbose: +            print ' '.join(cmds) + +        proc = subprocess.Popen(cmds, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env={'KUBECONFIG': self.kubeconfig}) +        proc.wait() +        if proc.returncode == 0: +            if output: +                try: +                    results = json.loads(proc.stdout.read()) +                except ValueError as err: +                    if "No JSON object could be decoded" in err.message: +                        results = err.message + +            if self.verbose: +                print proc.stderr.read() +                print results +                print + +            return {"returncode": proc.returncode, "results": results} + +        return {"returncode": proc.returncode, +                "stderr": proc.stderr.read(), +                "stdout": proc.stdout.read(), +                "results": {} +               } + +class Utils(object): +    ''' utilities for openshiftcli modules ''' +    @staticmethod +    def create_file(rname, data, ftype=None): +        ''' create a file in tmp with name and contents''' +        path = os.path.join('/tmp', rname) +        with open(path, 'w') as fds: +            if ftype == 'yaml': +                fds.write(yaml.dump(data, default_flow_style=False)) + +            elif ftype == 'json': +                fds.write(json.dumps(data)) +            else: +                fds.write(data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [path]) +        return path + +    @staticmethod +    def create_files_from_contents(data): +        '''Turn an array of dict: filename, content into a files array''' +        files = [] + +        for sfile in data: +            path = Utils.create_file(sfile['path'], sfile['content']) +            files.append(path) + +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if result.has_key('metadata') and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file  ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            contents = yaml.load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def check_def_equal(user_def, result_def, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace'] + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if not isinstance(user_def[key], list): +                    return False + +                # lists should be identical +                if value != user_def[key]: +                    return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if not isinstance(user_def[key], dict): +                    if debug: +                        print "dict returned false not instance of dict" +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print api_values +                        print user_values +                        print "keys are not equal in dict" +                    return False + +                result = Utils.check_def_equal(user_def[key], value, debug=debug) +                if not result: +                    if debug: +                        print "dict returned false" +                    return False + +            # Verify each key, value pair is the same +            else: +                if not user_def.has_key(key) or value != user_def[key]: +                    if debug: +                        print "value not equal; user_def does not have key" +                        print value +                        print user_def[key] +                    return False + +        return True + +class Service(OpenShiftCLI): +    ''' Class to wrap the oc command line tools +    ''' +    def __init__(self, +                 namespace, +                 service_name=None, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        super(Service, self).__init__(namespace, kubeconfig) +        self.namespace = namespace +        self.name = service_name +        self.verbose = verbose +        self.kubeconfig = kubeconfig + +    def create_service(self, sfile): +        ''' create the service ''' +        return self.create(sfile) + +    def get_services(self): +        '''return a secret by name ''' +        return self.get('services', self.name) + +    def delete_service(self): +        '''return all pods ''' +        return self.delete('service', self.name) + +    def update_service(self, sfile, force=False): +        '''run update service + +           This receives a list of file names and converts it into a secret. +           The secret is then written to disk and passed into the `oc replace` command. +        ''' +        return self.replace(sfile, force=force) + + +# pylint: disable=too-many-branches +def main(): +    ''' +    ansible oc module for services +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            name=dict(default=None, type='str'), +            service_file=dict(default=None, type='str'), +            input_type=dict(default='yaml', +                            choices=['json', 'yaml'], +                            type='str'), +            delete_after=dict(default=False, type='bool'), +            contents=dict(default=None, type='list'), +            force=dict(default=False, type='bool'), +        ), +        mutually_exclusive=[["contents", "service_file"]], + +        supports_check_mode=True, +    ) +    occmd = Service(module.params['namespace'], +                    module.params['name'], +                    kubeconfig=module.params['kubeconfig'], +                    verbose=module.params['debug']) + +    state = module.params['state'] + +    api_rval = occmd.get_services() + +    ##### +    # Get +    ##### +    if state == 'list': +        module.exit_json(changed=False, results=api_rval['results'], state="list") + +    if not module.params['name']: +        module.fail_json(msg='Please specify a name when state is absent|present.') +    ######## +    # Delete +    ######## +    if state == 'absent': +        if not Utils.exists(api_rval['results'], module.params['name']): +            module.exit_json(changed=False, state="absent") + +        if module.check_mode: +            module.exit_json(change=False, msg='Would have performed a delete.') + +        api_rval = occmd.delete_service() +        module.exit_json(changed=True, results=api_rval, state="absent") + + +    if state == 'present': +        if module.params['service_file']: +            sfile = module.params['service_file'] +        elif module.params['contents']: +            sfile = Utils.create_files_from_contents(module.params['contents']) +        else: +            module.fail_json(msg='Either specify files or contents.') + +        ######## +        # Create +        ######## +        if not Utils.exists(api_rval['results'], module.params['name']): + +            if module.check_mode: +                module.exit_json(change=False, msg='Would have performed a create.') + +            api_rval = occmd.create_service(sfile) + +            # Remove files +            if sfile and module.params['delete_after']: +                Utils.cleanup([sfile]) + +            module.exit_json(changed=True, results=api_rval, state="present") + +        ######## +        # Update +        ######## +        sfile_contents = Utils.get_resource_file(sfile, module.params['input_type']) +        if Utils.check_def_equal(sfile_contents, api_rval['results'][0]): + +            # Remove files +            if module.params['service_file'] and module.params['delete_after']: +                Utils.cleanup([sfile]) + +            module.exit_json(changed=False, results=api_rval['results'][0], state="present") + +        if module.check_mode: +            module.exit_json(change=False, msg='Would have performed an update.') + +        api_rval = occmd.update_service(sfile, force=module.params['force']) + +        # Remove files +        if sfile and module.params['delete_after']: +            Utils.cleanup([sfile]) + +        if api_rval['returncode'] != 0: +            module.fail_json(msg=api_rval) + + +        module.exit_json(changed=True, results=api_rval, state="present") + +    module.exit_json(failed=True, +                     changed=False, +                     results='Unknown state passed. %s' % state, +                     state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets.  This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 263daf210..30e29787a 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1407,6 +1407,7 @@ class OpenShiftFacts(object):          if 'node' in roles:              defaults['node'] = dict(labels={}, annotations={},                                      iptables_sync_period='5s', +                                    local_quota_per_fsgroup="",                                      set_node_ip=False)          if 'docker' in roles: diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index d2ff1b4b7..2a651df65 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -18,7 +18,7 @@    failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"    changed_when: osmiq_create_mi_project.rc == 0 -- name: Create Service Account +- name: Create Admin Service Account    shell: >      echo {{ manageiq_service_account | to_json | quote }} |      {{ openshift.common.client_binary }} create @@ -29,6 +29,17 @@    failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0"    changed_when: osmiq_create_service_account.rc == 0 +- name: Create Image Inspector Service Account +  shell: > +    echo {{ manageiq_image_inspector_service_account | to_json | quote }} | +    {{ openshift.common.client_binary }} create +    -n management-infra +    --config={{manage_iq_tmp_conf}} +    -f - +  register: osmiq_create_service_account +  failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0" +  changed_when: osmiq_create_service_account.rc == 0 +  - name: Create Cluster Role    shell: >      echo {{ manageiq_cluster_role | to_json | quote }} | diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 77e1c304b..69ee2cb4c 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -15,6 +15,12 @@ manageiq_service_account:      metadata:        name: management-admin +manageiq_image_inspector_service_account: +    apiVersion: v1 +    kind: ServiceAccount +    metadata: +      name: inspector-admin +  manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig  manage_iq_tasks: @@ -22,3 +28,5 @@ manage_iq_tasks:      - policy add-role-to-user -n management-infra management-infra-admin -z management-admin      - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin      - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin +    - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin +    - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 4b5832ab7..ca1e26459 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -31,6 +31,7 @@        node_image: "{{ osn_image | default(None) }}"        ovs_image: "{{ osn_ovs_image | default(None) }}"        proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" +      local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"  # We have to add tuned-profiles in the same transaction otherwise we run into depsolving  # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 67975d372..28cb1ea26 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -38,3 +38,6 @@ volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes  proxyArguments:    proxy-mode:       - {{ openshift.node.proxy_mode }} +volumeConfig: +  localQuota: +    perFSGroup: {{ openshift.node.local_quota_per_fsgroup }} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 53b1d6230..65d2291bb 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -12,7 +12,7 @@ Wants={{ openshift.common.service_type }}-master.service  [Service]  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node  ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn  -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn  -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev {{ openshift.node.node_image }}:${IMAGE_VERSION}  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node  SyslogIdentifier={{ openshift.common.service_type }}-node diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index f34fa7b74..5dd28d52a 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -9,7 +9,8 @@  - name: create the service account    shell: >         echo {{ lookup('template', '../templates/serviceaccount.j2') -               | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }}  create -f - +       | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }} +       -n {{ openshift_serviceaccounts_namespace }} create -f -    when: item.1.rc != 0    with_together:    - openshift_serviceaccounts_names | 
