diff options
author | Scott Dodson <sdodson@redhat.com> | 2017-09-07 16:32:56 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-09-07 16:32:56 -0400 |
commit | 7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8 (patch) | |
tree | b900625cbb8a97af4d2cf0d19d452bd643a9e0ec /roles/openshift_aws/tasks | |
parent | dc0e3d218ba953e1bc1525ef337f99677deee6c3 (diff) | |
parent | efe86b44bce679db38cca654818dc3837bb05f6a (diff) | |
download | openshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.tar.gz openshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.tar.bz2 openshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.tar.xz openshift-7b1c455c01d10ab5aa804ad48a5b60ab53d6a0c8.zip |
Merge pull request #5211 from kwoodson/provisioning_fixes
Provisioning updates.
Diffstat (limited to 'roles/openshift_aws/tasks')
-rw-r--r-- | roles/openshift_aws/tasks/ami_copy.yml | 34 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/build_ami.yml | 48 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/build_node_group.yml | 34 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/elb.yml | 68 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/iam_cert.yml | 29 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/launch_config.yml | 45 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/provision.yml | 54 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/provision_nodes.yml | 66 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/s3.yml | 7 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/scale_group.yml | 32 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/seal_ami.yml | 49 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/security_group.yml | 45 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/ssh_keys.yml | 8 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/vpc.yml | 52 |
14 files changed, 571 insertions, 0 deletions
diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml new file mode 100644 index 000000000..07020dd62 --- /dev/null +++ b/roles/openshift_aws/tasks/ami_copy.yml @@ -0,0 +1,34 @@ +--- +- fail: + msg: "{{ item }} needs to be defined" + when: item is not defined + with_items: + - openshift_aws_ami_copy_src_ami + - openshift_aws_ami_copy_name + +- name: Create IAM KMS key with alias + oo_iam_kms: + state: present + alias: "{{ openshift_aws_iam_kms_alias }}" + region: "{{ openshift_aws_region }}" + register: created_kms + +- debug: var=created_kms.results + +- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}" + ec2_ami_copy: + name: "{{ openshift_aws_ami_copy_name }}" + region: "{{ openshift_aws_region }}" + source_region: "{{ openshift_aws_ami_copy_src_region }}" + source_image_id: "{{ openshift_aws_ami_copy_src_ami }}" + encrypted: "{{ openshift_aws_ami_encrypt | bool }}" + kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}" + wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}" + tags: "{{ openshift_aws_ami_tags }}" + register: copy_result + +- debug: var=copy_result + +- name: return AMI ID with setfact + set_fact: + openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml new file mode 100644 index 000000000..8d4e5ac43 --- /dev/null +++ b/roles/openshift_aws/tasks/build_ami.yml @@ -0,0 +1,48 @@ +--- +- when: openshift_aws_create_vpc | bool + name: create a vpc + include: vpc.yml + +- when: openshift_aws_users | length > 0 + name: create aws ssh keypair + include: ssh_keys.yml + +- when: openshift_aws_create_security_groups | bool + name: Create compute security_groups + include: security_group.yml + +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: create instance for ami creation + ec2: + assign_public_ip: yes + region: "{{ openshift_aws_region }}" + key_name: "{{ openshift_aws_ssh_key_name }}" + group: "{{ openshift_aws_clusterid }}" + instance_type: m4.xlarge + vpc_subnet_id: "{{ subnetout.subnets[0].id }}" + image: "{{ openshift_aws_base_ami }}" + volumes: + - device_name: /dev/sdb + volume_type: gp2 + volume_size: 100 + delete_on_termination: true + wait: yes + exact_count: 1 + count_tag: + Name: "{{ openshift_aws_base_ami_name }}" + instance_tags: + Name: "{{ openshift_aws_base_ami_name }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml new file mode 100644 index 000000000..0dac1c23d --- /dev/null +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -0,0 +1,34 @@ +--- +# When openshift_aws_use_custom_ami is '' then +# we retrieve the latest build AMI. +# Then set openshift_aws_ami to the ami. +- when: openshift_aws_ami == '' + block: + - name: fetch recently created AMI + ec2_ami_find: + region: "{{ openshift_aws_region }}" + sort: creationDate + sort_order: descending + name: "{{ openshift_aws_ami_name }}*" + ami_tags: "{{ openshift_aws_ami_tags }}" + no_result_action: fail + register: amiout + + - name: Set the openshift_aws_ami + set_fact: + openshift_aws_ami: "{{ amiout.results[0].ami_id }}" + when: + - "'results' in amiout" + - amiout.results|length > 0 + +- when: openshift_aws_create_security_groups + name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + +- when: openshift_aws_create_launch_config + name: "Create {{ openshift_aws_node_group_type }} launch config" + include: launch_config.yml + +- when: openshift_aws_create_scale_group + name: "Create {{ openshift_aws_node_group_type }} node group" + include: scale_group.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml new file mode 100644 index 000000000..a1fdd66fc --- /dev/null +++ b/roles/openshift_aws/tasks/elb.yml @@ -0,0 +1,68 @@ +--- +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: debug + debug: var=vpcout + +- name: fetch the remote instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: "{{ openshift_aws_elb_instance_filter }}" + register: instancesout + +- name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: + debug: + msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] + if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type + else openshift_aws_elb_listeners }}" + +- name: "Create ELB {{ openshift_aws_elb_name }}" + ec2_elb_lb: + name: "{{ openshift_aws_elb_name }}" + state: present + security_group_names: "{{ openshift_aws_elb_security_groups }}" + idle_timeout: "{{ openshift_aws_elb_idle_timout }}" + region: "{{ openshift_aws_region }}" + subnets: + - "{{ subnetout.subnets[0].id }}" + health_check: "{{ openshift_aws_elb_health_check }}" + listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] + if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type + else openshift_aws_elb_listeners }}" + scheme: "{{ openshift_aws_elb_scheme }}" + tags: + KubernetesCluster: "{{ openshift_aws_clusterid }}" + register: new_elb + +# It is necessary to ignore_errors here because the instances are not in 'ready' +# state when first added to ELB +- name: "Add instances to ELB {{ openshift_aws_elb_name }}" + ec2_elb: + instance_id: "{{ item.id }}" + ec2_elbs: "{{ openshift_aws_elb_name }}" + state: present + region: "{{ openshift_aws_region }}" + wait: False + with_items: "{{ instancesout.instances }}" + ignore_errors: True + retries: 10 + register: elb_call + until: elb_call|succeeded + +- debug: + msg: "{{ item }}" + with_items: + - "{{ new_elb }}" diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml new file mode 100644 index 000000000..cd9772a25 --- /dev/null +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -0,0 +1,29 @@ +--- +- name: upload certificates to AWS IAM + iam_cert23: + state: present + name: "{{ openshift_aws_iam_cert_name }}" + cert: "{{ openshift_aws_iam_cert_path }}" + key: "{{ openshift_aws_iam_cert_key_path }}" + cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}" + register: elb_cert_chain + failed_when: + - "'failed' in elb_cert_chain" + - elb_cert_chain.failed + - "'msg' in elb_cert_chain" + - "'already exists and has a different certificate body' in elb_cert_chain.msg" + - "'BotoServerError' in elb_cert_chain.msg" + when: + - openshift_aws_create_iam_cert | bool + - openshift_aws_iam_cert_path != '' + - openshift_aws_iam_cert_key_path != '' + - openshift_aws_elb_cert_arn == '' + +- name: set_fact openshift_aws_elb_cert_arn + set_fact: + openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" + +- name: wait for cert to propagate + pause: + seconds: 5 + when: elb_cert_chain.changed diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml new file mode 100644 index 000000000..65c5a6cc0 --- /dev/null +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -0,0 +1,45 @@ +--- +- fail: + msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image." + when: + - openshift_aws_ami is undefined + +- name: fetch the security groups for launch config + ec2_group_facts: + filters: + group-name: + - "{{ openshift_aws_clusterid }}" # default sg + - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg + - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s + region: "{{ openshift_aws_region }}" + register: ec2sgs + +# Create the scale group config +- name: Create the node scale group launch config + ec2_lc: + name: "{{ openshift_aws_launch_config_name }}" + region: "{{ openshift_aws_region }}" + image_id: "{{ openshift_aws_ami }}" + instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}" + security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" + user_data: |- + #cloud-config + {% if openshift_aws_node_group_type != 'master' %} + write_files: + - path: /root/csr_kubeconfig + owner: root:root + permissions: '0640' + content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} + - path: /root/openshift_settings + owner: root:root + permissions: '0640' + content: + openshift_type: "{{ openshift_aws_node_group_type }}" + runcmd: + - [ systemctl, enable, atomic-openshift-node] + - [ systemctl, start, atomic-openshift-node] + {% endif %} + key_name: "{{ openshift_aws_ssh_key_name }}" + ebs_optimized: False + volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}" + assign_public_ip: True diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml new file mode 100644 index 000000000..189caeaee --- /dev/null +++ b/roles/openshift_aws/tasks/provision.yml @@ -0,0 +1,54 @@ +--- +- when: openshift_aws_create_vpc | bool + name: create default vpc + include: vpc.yml + +- when: openshift_aws_create_iam_cert | bool + name: create the iam_cert for elb certificate + include: iam_cert.yml + +- when: openshift_aws_users | length > 0 + name: create aws ssh keypair + include: ssh_keys.yml + +- when: openshift_aws_create_s3 | bool + name: create s3 bucket for registry + include: s3.yml + +- name: include scale group creation for master + include: build_node_group.yml + +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": "{{ openshift_aws_node_group_type }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: create our master internal load balancers + include: elb.yml + vars: + openshift_aws_elb_direction: internal + openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal" + openshift_aws_elb_scheme: internal + +- name: create our master external load balancers + include: elb.yml + vars: + openshift_aws_elb_direction: external + openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external" + openshift_aws_elb_scheme: internet-facing + +- name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" + when: openshift_aws_wait_for_ssh | bool diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml new file mode 100644 index 000000000..fc4996c68 --- /dev/null +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -0,0 +1,66 @@ +--- +# Get bootstrap config token +# bootstrap should be created on first master +# need to fetch it and shove it into cloud data +- name: fetch master instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: slurp down the bootstrap.kubeconfig + slurp: + src: /etc/origin/master/bootstrap.kubeconfig + delegate_to: "{{ instancesout.instances[0].public_ip_address }}" + remote_user: root + register: bootstrap + +- name: set_fact for kubeconfig token + set_fact: + openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}" + +- name: include build node group for infra + include: build_node_group.yml + vars: + openshift_aws_node_group_type: infra + openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra" + openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}" + +- name: include build node group for compute + include: build_node_group.yml + vars: + openshift_aws_node_group_type: compute + openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute" + openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}" + +- when: openshift_aws_wait_for_ssh | bool + block: + - name: pause and allow for instances to scale before we query them + pause: + seconds: 10 + + - name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:clusterid": "{{ openshift_aws_clusterid }}" + "tag:host-type": node + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml new file mode 100644 index 000000000..9cf37c840 --- /dev/null +++ b/roles/openshift_aws/tasks/s3.yml @@ -0,0 +1,7 @@ +--- +- name: Create an s3 bucket + s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: "{{ openshift_aws_s3_mode }}" + region: "{{ openshift_aws_region }}" + when: openshift_aws_create_s3 | bool diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml new file mode 100644 index 000000000..3e969fc43 --- /dev/null +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -0,0 +1,32 @@ +--- +- name: query vpc + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + 'tag:Name': "{{ openshift_aws_vpc_name }}" + register: vpcout + +- name: fetch the subnet to use in scale group + ec2_vpc_subnet_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_subnet_name }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + register: subnetout + +- name: Create the scale group + ec2_asg: + name: "{{ openshift_aws_scale_group_name }}" + launch_config_name: "{{ openshift_aws_launch_config_name }}" + health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}" + health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}" + min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}" + max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}" + desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}" + region: "{{ openshift_aws_region }}" + termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" + load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}" + wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}" + vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" + tags: + - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml new file mode 100644 index 000000000..0cb749dcc --- /dev/null +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -0,0 +1,49 @@ +--- +- name: fetch newly created instances + ec2_remote_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_base_ami_name }}" + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + +- name: bundle ami + ec2_ami: + instance_id: "{{ instancesout.instances.0.id }}" + region: "{{ openshift_aws_region }}" + state: present + description: "This was provisioned {{ ansible_date_time.iso8601 }}" + name: "{{ openshift_aws_ami_name }}" + tags: "{{ openshift_aws_ami_tags }}" + wait: yes + register: amioutput + +- debug: var=amioutput + +- when: openshift_aws_ami_encrypt | bool + block: + - name: augment the encrypted ami tags with source-ami + set_fact: + source_tag: + source-ami: "{{ amioutput.image_id }}" + + - name: copy the ami for encrypted disks + include: ami_copy.yml + vars: + openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted" + openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" + # TODO: How does the kms alias get passed to ec2_ami_copy + openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" + openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}" + # this option currently fails due to boto waiters + # when supported this need to be reapplied + #openshift_aws_ami_copy_wait: True + +- name: terminate temporary instance + ec2: + state: absent + region: "{{ openshift_aws_region }}" + instance_ids: "{{ instancesout.instances.0.id }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml new file mode 100644 index 000000000..161e72fb4 --- /dev/null +++ b/roles/openshift_aws/tasks/security_group.yml @@ -0,0 +1,45 @@ +--- +- name: Fetch the VPC for the vpc.id + ec2_vpc_net_facts: + region: "{{ openshift_aws_region }}" + filters: + "tag:Name": "{{ openshift_aws_clusterid }}" + register: vpcout + +- name: Create default security group for cluster + ec2_group: + name: "{{ openshift_aws_node_security_groups.default.name }}" + description: "{{ openshift_aws_node_security_groups.default.desc }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}" + register: sg_default_created + +- name: create the node group sgs + ec2_group: + name: "{{ item.name}}" + description: "{{ item.desc }}" + rules: "{{ item.rules if 'rules' in item else [] }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: sg_create + with_items: + - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: create the k8s sgs for the node group + ec2_group: + name: "{{ item.name }}_k8s" + description: "{{ item.desc }} for k8s" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + register: k8s_sg_create + with_items: + - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}" + +- name: tag sg groups with proper tags + ec2_tag: + tags: + KubernetesCluster: "{{ openshift_aws_clusterid }}" + resource: "{{ item.group_id }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/tasks/ssh_keys.yml b/roles/openshift_aws/tasks/ssh_keys.yml new file mode 100644 index 000000000..f439ce74e --- /dev/null +++ b/roles/openshift_aws/tasks/ssh_keys.yml @@ -0,0 +1,8 @@ +--- +- name: Add the public keys for the users + ec2_key: + name: "{{ item.key_name }}" + key_material: "{{ item.pub_key }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ openshift_aws_users }}" + no_log: True diff --git a/roles/openshift_aws/tasks/vpc.yml b/roles/openshift_aws/tasks/vpc.yml new file mode 100644 index 000000000..ce2c8eac5 --- /dev/null +++ b/roles/openshift_aws/tasks/vpc.yml @@ -0,0 +1,52 @@ +--- +- name: Create AWS VPC + ec2_vpc_net: + state: present + cidr_block: "{{ openshift_aws_vpc.cidr }}" + dns_support: True + dns_hostnames: True + region: "{{ openshift_aws_region }}" + name: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_vpc_tags }}" + register: vpc + +- name: Sleep to avoid a race condition when creating the vpc + pause: + seconds: 5 + when: vpc.changed + +- name: assign the vpc igw + ec2_vpc_igw: + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpc.vpc.id }}" + register: igw + +- name: assign the vpc subnets + ec2_vpc_subnet: + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpc.vpc.id }}" + cidr: "{{ item.cidr }}" + az: "{{ item.az }}" + resource_tags: + Name: "{{ item.az }}" + with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}" + +- name: Grab the route tables from our VPC + ec2_vpc_route_table_facts: + region: "{{ openshift_aws_region }}" + filters: + vpc-id: "{{ vpc.vpc.id }}" + register: route_table + +- name: update the route table in the vpc + ec2_vpc_route_table: + lookup: id + route_table_id: "{{ route_table.route_tables[0].id }}" + vpc_id: "{{ vpc.vpc.id }}" + region: "{{ openshift_aws_region }}" + tags: + Name: "{{ openshift_aws_vpc_name }}" + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + register: route_table_out |