diff --git a/Readmd.md b/Readmd.md
new file mode 100644
index 0000000..e24a47d
--- /dev/null
+++ b/Readmd.md
@@ -0,0 +1,24 @@
+
Kubernetes on AWS with Terraform
+
+This repository helps to spin up AWS environment and create kubernetes cluster on top of that.
+
+- Prerequisites
+ - AWS account details
+ - Ansible on your local machine
+ - Terraform on your local machine
+
+### Usage
+
+Update the aws account details in terrform varaiable file, then run the below command to install kubernetes cluster on AWS
+
+```
+bash k8scluster.sh
+```
+
+To clean up the AWS environment with kubernetes, run the below command
+
+```
+cd terrform
+terraform destroy -auto-approve
+```
+
diff --git a/ansible/README.md b/ansible/README.md
new file mode 100644
index 0000000..fdce9cc
--- /dev/null
+++ b/ansible/README.md
@@ -0,0 +1,39 @@
+ Install multi-node kubernetes cluster with Ansible Playbooks
+
+- Prerequisites
+ - SSH trust setup from your local vm to remote hosts or use remote host private_key
+ - ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
+ ssh anguda@$host | sudo -S mkdir /root/.ssh
+ ssh anguda@$host | sudo -S touch /root/.ssh/authorized_keys
+ ssh anguda@$host | sudo -S apt install git ansible vim sshpass openssh-server -y
+ cat /root/.ssh/id_rsa.pub | sshpass -p k8s123 ssh root@$host "cat >> /root/.ssh/authorized_keys"
+
+
+
+This directory helps you to install kubernetes cluster with ansible playbooks. Please make sure to pass inventory file for each playbooks
+
+- inventory example
+
+```
+[k8s-masters]
+54.219.223.243 ansible_ssh_host=54.219.223.243 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+
+[k8s-workers]
+52.52.238.67 ansible_ssh_host=52.52.238.67 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+52.8.50.178 ansible_ssh_host=52.8.50.178 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+```
+
+### Usage
+
+First make sure to install the prerequisites.yaml to install all componenets
+
+```
+ansible-playbook prerequisites.yaml -i inventory
+```
+
+Then run k8s.yaml to install kubernetes cluster with kubeadm
+
+```
+ansible-playbook k8s.yaml -i inventory
+```
+
diff --git a/ansible/inventory b/ansible/inventory
new file mode 100644
index 0000000..77ebadd
--- /dev/null
+++ b/ansible/inventory
@@ -0,0 +1,7 @@
+
+[k8s-masters]
+ec2-52-52-180-22.us-west-1.compute.amazonaws.com ansible_ssh_host=52.52.180.22 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+
+[k8s-workers]
+ec2-13-57-111-53.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.111.53 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+ec2-13-57-45-138.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.45.138 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
diff --git a/ansible/k8s.yaml b/ansible/k8s.yaml
new file mode 100644
index 0000000..5884331
--- /dev/null
+++ b/ansible/k8s.yaml
@@ -0,0 +1,116 @@
+- hosts: k8s-masters
+ become: True
+ tasks:
+
+ - name: Reset Kubernetes component
+ shell: "kubeadm reset --force"
+ register: reset_cluster
+
+ - name: remove etcd directory
+ ignore_errors: yes
+ shell: "{{ item }}"
+ with_items:
+ - rm -rf /var/lib/etcd
+ - rm -rf $HOME/.kube
+
+ - name: Initialize the Kubernetes cluster using kubeadm
+ command: kubeadm init --pod-network-cidr=10.244.0.0/16 --v 9
+ register: kubeadm
+
+ - debug: msg={{ kubeadm.stdout_lines }}
+
+ - name: Create kube directory
+ file:
+ path: $HOME/.kube
+ state: directory
+
+ - name: Copy kubeconfig to home
+ shell: |
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+ - name: Install networking plugin to kubernetes cluster
+ command: "kubectl apply -f {{ item }}"
+ with_items:
+ - https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+ - https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
+ - https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml
+ - https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml
+ - https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml
+
+ - name: Change permissions of the service account(kubernetes-dashboard) for the dashboard
+ command: kubectl create clusterrolebinding kubernetes-dashboard -n kube-system --clusterrole=cluster-admin --serviceaccount=kube-system:kubernetes-dashboard
+
+ - name: Run KubeProxy
+ shell: nohup kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' /dev/null 2>&1 &
+
+ - name: Generate join token
+ shell: kubeadm token create --print-join-command
+ register: kubeadm_join_cmd
+
+ - set_fact:
+ kubeadm_join: "{{ kubeadm_join_cmd.stdout }}"
+
+ - debug: var=kubeadm_join
+
+ - name: Store join command
+ action: copy content="{{ kubeadm_join }}" dest="/etc/kubernetes/kubeadm-join.command"
+
+ - name: ansible copy file from remote to local.
+ fetch:
+ src: /etc/kubernetes/kubeadm-join.command
+ dest: /tmp/kubeadm-join.command
+ flat: yes
+
+- hosts: k8s-workers
+ become: true
+ vars:
+ kubeadm_join: "{{ lookup('file', '/tmp/kubeadm-join.command') }}"
+ tasks:
+
+ - name: Copy Kubeadm join
+ copy:
+ src: /tmp/kubeadm-join.command
+ dest: /tmp/kubeadm-join.command
+
+ - name: Reset Kubernetes component
+ shell: "kubeadm reset --force"
+ ignore_errors: yes
+
+ - name: remove kubernetes directory
+ shell: "/bin/rm -rf /etc/kubernetes"
+ ignore_errors: yes
+
+ - name: Run kubeadm join
+ shell: "{{ kubeadm_join }} --ignore-preflight-errors=swap"
+
+- hosts: k8s-masters
+ become: true
+ tasks:
+ - name: Get Node name
+ shell: "kubectl get nodes | grep -v master | awk '{print $1}' | grep -v NAME"
+ register: node_name
+
+ - debug: var=node_name
+
+ - name: Lable the node
+ shell: "kubectl label node {{ item }} node-role.kubernetes.io/node="
+ with_items: "{{ node_name.stdout_lines }}"
+
+ - name: "Check if Helm is installed"
+ shell: command -v helm >/dev/null 2>&1
+ register: helm_exists
+ ignore_errors: yes
+
+ - name: "Install Helm"
+ command: "{{ item }}"
+ args:
+ warn: false
+ with_items:
+ - curl -O https://get.helm.sh/helm-v3.1.1-linux-amd64.tar.gz
+ - tar -xvzf helm-v3.1.1-linux-amd64.tar.gz
+ - cp linux-amd64/helm /usr/local/bin/
+ - cp linux-amd64/helm /usr/bin/
+ - rm -rf helm-v3.1.1-linux-amd64.tar.gz linux-amd64
+
+ when: helm_exists.rc > 0
diff --git a/ansible/prerequisites.yaml b/ansible/prerequisites.yaml
new file mode 100644
index 0000000..ed578b9
--- /dev/null
+++ b/ansible/prerequisites.yaml
@@ -0,0 +1,146 @@
+- name: Define hosts
+ hosts: all
+ become: true
+ tasks:
+ - name: upgrade a server
+ become: true
+ become_user: root
+ apt: update_cache=yes only_upgrade=yes
+ ignore_errors: yes
+
+ - name: Add an Kubernetes apt signing key for Ubuntu
+ when: "ansible_distribution == 'Ubuntu'"
+ apt_key:
+ url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
+ state: present
+
+ - name: Adding Kubernetes apt repository for Ubuntu
+ when: "ansible_distribution == 'Ubuntu'"
+ apt_repository:
+ repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
+ state: present
+ filename: kubernetes
+
+ - name: install kubernetes components for Ubuntu
+ when: "ansible_distribution == 'Ubuntu'"
+ apt:
+ name: ['apt-transport-https', 'curl', 'ca-certificates', 'gnupg-agent' ,'software-properties-common', 'kubelet=1.15.3-00', 'kubeadm=1.15.3-00', 'kubectl=1.15.3-00']
+ state: present
+
+ - name: Validate whether Kubernetes cluster installed
+ shell: kubectl cluster-info
+ register: k8sup
+ ignore_errors: yes
+
+ - name: Add Docker GPG key for Ubuntu
+ when: "ansible_distribution == 'Ubuntu' and 'running' not in k8sup.stdout"
+ apt_key: url=https://download.docker.com/linux/ubuntu/gpg
+
+ - name: Add Docker APT repository for Ubuntu
+ when: "ansible_distribution == 'Ubuntu' and 'running' not in k8sup.stdout"
+ apt_repository:
+ repo: deb [arch=amd64] https://download.docker.com/linux/{{ansible_distribution|lower}} {{ansible_distribution_release}} stable
+
+ - name: Install Docker-CE Engine on Ubuntu
+ when: " ansible_distribution == 'Ubuntu' and 'running' not in k8sup.stdout"
+ apt:
+ name: [ 'docker-ce=5:19.03.1~3-0~ubuntu-bionic' ]
+ state: present
+ update_cache: yes
+
+ - name: Creating a Kubernetes repository file for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS']"
+ file:
+ path: /etc/yum.repos.d/kubernetes.repo
+ state: touch
+
+ - name: Adding repository details in Kubernetes repo file for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS']"
+ blockinfile:
+ path: /etc/yum.repos.d/kubernetes.repo
+ block: |
+ [kubernetes]
+ name=Kubernetes
+ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled=1
+ gpgcheck=0
+ repo_gpgcheck=0
+ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+
+ - name: Installing required packages for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS']"
+ yum:
+ name: ['bind-utils', 'yum-utils', 'device-mapper-persistent-data', 'lvm2', 'telnet', 'kubelet-1.15.5', 'kubeadm-1.15.5', 'kubectl-1.15.5', 'firewalld', 'curl']
+ state: present
+
+
+ - name: "Configuring Docker-CE repo for RHEL/CentOS"
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ get_url:
+ url: https://download.docker.com/linux/centos/docker-ce.repo
+ dest: /etc/yum.repos.d/docker-ce.repo
+ mode: 0644
+
+ - name: Install Docker-CE Engine on RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ args:
+ warn: false
+ shell: yum install docker -y
+
+ - name: SetEnforce for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ ignore_errors: yes
+ command: "setenforce 0"
+
+ - name: SELinux for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ args:
+ warn: false
+ command: sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
+
+ - name: Enable Firewall Service for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ service:
+ name: firewalld
+ state: started
+ enabled: yes
+ ignore_errors: yes
+
+ - name: Allow Network Ports in Firewalld for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ firewalld:
+ port: "{{ item }}"
+ state: enabled
+ permanent: yes
+ immediate: yes
+ with_items:
+ - "6443/tcp"
+ - "10250/tcp"
+
+
+ - name: Remove swapfile from /etc/fstab
+ when: "'running' not in k8sup.stdout"
+ mount:
+ name: "{{ item }}"
+ fstype: swap
+ state: absent
+ with_items:
+ - swap
+ - none
+
+ - name: Disable swap
+ when: "'running' not in k8sup.stdout"
+ command: swapoff -a
+
+ - name: Starting and enabling the required services
+ when: "'running' not in k8sup.stdout"
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ ignore_errors: yes
+ with_items:
+ - docker
+ - kubelet
+
diff --git a/k8scluster.sh b/k8scluster.sh
new file mode 100644
index 0000000..5916bf5
--- /dev/null
+++ b/k8scluster.sh
@@ -0,0 +1,15 @@
+
+cd terraform
+terraform init
+terraform plan
+terraform apply -auto-approve
+terraform output inventory > ../ansible/inventory
+
+echo "Please wait for a while to bring aws instances up"
+
+sleep 60
+cd ../ansible
+ansible -m ping -i inventory all
+ansible-playbook -i inventory prerequisites.yaml
+ansible-playbook -i inventory k8s.yaml
+
diff --git a/terraform/Readmd.md b/terraform/Readmd.md
new file mode 100644
index 0000000..e24a47d
--- /dev/null
+++ b/terraform/Readmd.md
@@ -0,0 +1,24 @@
+ Kubernetes on AWS with Terraform
+
+This repository helps to spin up AWS environment and create kubernetes cluster on top of that.
+
+- Prerequisites
+ - AWS account details
+ - Ansible on your local machine
+ - Terraform on your local machine
+
+### Usage
+
+Update the aws account details in terrform varaiable file, then run the below command to install kubernetes cluster on AWS
+
+```
+bash k8scluster.sh
+```
+
+To clean up the AWS environment with kubernetes, run the below command
+
+```
+cd terrform
+terraform destroy -auto-approve
+```
+
diff --git a/terraform/Readme.md b/terraform/Readme.md
new file mode 100644
index 0000000..9bff2c0
--- /dev/null
+++ b/terraform/Readme.md
@@ -0,0 +1,39 @@
+ Terraform on AWS
+
+For kubernetes multi node cluster we need to bring up multi nodes with help of terraform
+
+- Prerequisites
+ - aws_access_key
+ - aws_secret_key
+ - aws_keypair_name
+
+Make sure to update these values on varaiable.tf to access your aws account
+
+If you want to modify any details like use another aws AMI, use variable.tf file to refer that
+
+### Usage
+
+Make sure to inititate the terraform to load all plugins
+
+```
+terraform init
+```
+
+Now verify the terrafor plan with below command
+
+```
+terraform plan
+```
+
+Once verify the plan, now apply the terraform state to aws account
+
+```
+terraform apply -auto-aprrove
+```
+
+To create a ansible inventory, run the below command . if you want to change the format of inventory file modify outputs.tf file.
+
+```
+terraform output inventory > ../ansible/inventory
+```
+
diff --git a/terraform/ansible/README.md b/terraform/ansible/README.md
new file mode 100644
index 0000000..fdce9cc
--- /dev/null
+++ b/terraform/ansible/README.md
@@ -0,0 +1,39 @@
+ Install multi-node kubernetes cluster with Ansible Playbooks
+
+- Prerequisites
+ - SSH trust setup from your local vm to remote hosts or use remote host private_key
+ - ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
+ ssh anguda@$host | sudo -S mkdir /root/.ssh
+ ssh anguda@$host | sudo -S touch /root/.ssh/authorized_keys
+ ssh anguda@$host | sudo -S apt install git ansible vim sshpass openssh-server -y
+ cat /root/.ssh/id_rsa.pub | sshpass -p k8s123 ssh root@$host "cat >> /root/.ssh/authorized_keys"
+
+
+
+This directory helps you to install kubernetes cluster with ansible playbooks. Please make sure to pass inventory file for each playbooks
+
+- inventory example
+
+```
+[k8s-masters]
+54.219.223.243 ansible_ssh_host=54.219.223.243 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+
+[k8s-workers]
+52.52.238.67 ansible_ssh_host=52.52.238.67 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+52.8.50.178 ansible_ssh_host=52.8.50.178 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+```
+
+### Usage
+
+First make sure to install the prerequisites.yaml to install all componenets
+
+```
+ansible-playbook prerequisites.yaml -i inventory
+```
+
+Then run k8s.yaml to install kubernetes cluster with kubeadm
+
+```
+ansible-playbook k8s.yaml -i inventory
+```
+
diff --git a/terraform/ansible/inventory b/terraform/ansible/inventory
new file mode 100644
index 0000000..77ebadd
--- /dev/null
+++ b/terraform/ansible/inventory
@@ -0,0 +1,7 @@
+
+[k8s-masters]
+ec2-52-52-180-22.us-west-1.compute.amazonaws.com ansible_ssh_host=52.52.180.22 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+
+[k8s-workers]
+ec2-13-57-111-53.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.111.53 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
+ec2-13-57-45-138.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.45.138 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
diff --git a/terraform/ansible/k8s.yaml b/terraform/ansible/k8s.yaml
new file mode 100644
index 0000000..5884331
--- /dev/null
+++ b/terraform/ansible/k8s.yaml
@@ -0,0 +1,116 @@
+- hosts: k8s-masters
+ become: True
+ tasks:
+
+ - name: Reset Kubernetes component
+ shell: "kubeadm reset --force"
+ register: reset_cluster
+
+ - name: remove etcd directory
+ ignore_errors: yes
+ shell: "{{ item }}"
+ with_items:
+ - rm -rf /var/lib/etcd
+ - rm -rf $HOME/.kube
+
+ - name: Initialize the Kubernetes cluster using kubeadm
+ command: kubeadm init --pod-network-cidr=10.244.0.0/16 --v 9
+ register: kubeadm
+
+ - debug: msg={{ kubeadm.stdout_lines }}
+
+ - name: Create kube directory
+ file:
+ path: $HOME/.kube
+ state: directory
+
+ - name: Copy kubeconfig to home
+ shell: |
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+ - name: Install networking plugin to kubernetes cluster
+ command: "kubectl apply -f {{ item }}"
+ with_items:
+ - https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+ - https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
+ - https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml
+ - https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml
+ - https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml
+
+ - name: Change permissions of the service account(kubernetes-dashboard) for the dashboard
+ command: kubectl create clusterrolebinding kubernetes-dashboard -n kube-system --clusterrole=cluster-admin --serviceaccount=kube-system:kubernetes-dashboard
+
+ - name: Run KubeProxy
+ shell: nohup kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' /dev/null 2>&1 &
+
+ - name: Generate join token
+ shell: kubeadm token create --print-join-command
+ register: kubeadm_join_cmd
+
+ - set_fact:
+ kubeadm_join: "{{ kubeadm_join_cmd.stdout }}"
+
+ - debug: var=kubeadm_join
+
+ - name: Store join command
+ action: copy content="{{ kubeadm_join }}" dest="/etc/kubernetes/kubeadm-join.command"
+
+ - name: ansible copy file from remote to local.
+ fetch:
+ src: /etc/kubernetes/kubeadm-join.command
+ dest: /tmp/kubeadm-join.command
+ flat: yes
+
+- hosts: k8s-workers
+ become: true
+ vars:
+ kubeadm_join: "{{ lookup('file', '/tmp/kubeadm-join.command') }}"
+ tasks:
+
+ - name: Copy Kubeadm join
+ copy:
+ src: /tmp/kubeadm-join.command
+ dest: /tmp/kubeadm-join.command
+
+ - name: Reset Kubernetes component
+ shell: "kubeadm reset --force"
+ ignore_errors: yes
+
+ - name: remove kubernetes directory
+ shell: "/bin/rm -rf /etc/kubernetes"
+ ignore_errors: yes
+
+ - name: Run kubeadm join
+ shell: "{{ kubeadm_join }} --ignore-preflight-errors=swap"
+
+- hosts: k8s-masters
+ become: true
+ tasks:
+ - name: Get Node name
+ shell: "kubectl get nodes | grep -v master | awk '{print $1}' | grep -v NAME"
+ register: node_name
+
+ - debug: var=node_name
+
+ - name: Lable the node
+ shell: "kubectl label node {{ item }} node-role.kubernetes.io/node="
+ with_items: "{{ node_name.stdout_lines }}"
+
+ - name: "Check if Helm is installed"
+ shell: command -v helm >/dev/null 2>&1
+ register: helm_exists
+ ignore_errors: yes
+
+ - name: "Install Helm"
+ command: "{{ item }}"
+ args:
+ warn: false
+ with_items:
+ - curl -O https://get.helm.sh/helm-v3.1.1-linux-amd64.tar.gz
+ - tar -xvzf helm-v3.1.1-linux-amd64.tar.gz
+ - cp linux-amd64/helm /usr/local/bin/
+ - cp linux-amd64/helm /usr/bin/
+ - rm -rf helm-v3.1.1-linux-amd64.tar.gz linux-amd64
+
+ when: helm_exists.rc > 0
diff --git a/terraform/ansible/prerequisites.yaml b/terraform/ansible/prerequisites.yaml
new file mode 100644
index 0000000..ed578b9
--- /dev/null
+++ b/terraform/ansible/prerequisites.yaml
@@ -0,0 +1,146 @@
+- name: Define hosts
+ hosts: all
+ become: true
+ tasks:
+ - name: upgrade a server
+ become: true
+ become_user: root
+ apt: update_cache=yes only_upgrade=yes
+ ignore_errors: yes
+
+ - name: Add an Kubernetes apt signing key for Ubuntu
+ when: "ansible_distribution == 'Ubuntu'"
+ apt_key:
+ url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
+ state: present
+
+ - name: Adding Kubernetes apt repository for Ubuntu
+ when: "ansible_distribution == 'Ubuntu'"
+ apt_repository:
+ repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
+ state: present
+ filename: kubernetes
+
+ - name: install kubernetes components for Ubuntu
+ when: "ansible_distribution == 'Ubuntu'"
+ apt:
+ name: ['apt-transport-https', 'curl', 'ca-certificates', 'gnupg-agent' ,'software-properties-common', 'kubelet=1.15.3-00', 'kubeadm=1.15.3-00', 'kubectl=1.15.3-00']
+ state: present
+
+ - name: Validate whether Kubernetes cluster installed
+ shell: kubectl cluster-info
+ register: k8sup
+ ignore_errors: yes
+
+ - name: Add Docker GPG key for Ubuntu
+ when: "ansible_distribution == 'Ubuntu' and 'running' not in k8sup.stdout"
+ apt_key: url=https://download.docker.com/linux/ubuntu/gpg
+
+ - name: Add Docker APT repository for Ubuntu
+ when: "ansible_distribution == 'Ubuntu' and 'running' not in k8sup.stdout"
+ apt_repository:
+ repo: deb [arch=amd64] https://download.docker.com/linux/{{ansible_distribution|lower}} {{ansible_distribution_release}} stable
+
+ - name: Install Docker-CE Engine on Ubuntu
+ when: " ansible_distribution == 'Ubuntu' and 'running' not in k8sup.stdout"
+ apt:
+ name: [ 'docker-ce=5:19.03.1~3-0~ubuntu-bionic' ]
+ state: present
+ update_cache: yes
+
+ - name: Creating a Kubernetes repository file for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS']"
+ file:
+ path: /etc/yum.repos.d/kubernetes.repo
+ state: touch
+
+ - name: Adding repository details in Kubernetes repo file for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS']"
+ blockinfile:
+ path: /etc/yum.repos.d/kubernetes.repo
+ block: |
+ [kubernetes]
+ name=Kubernetes
+ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled=1
+ gpgcheck=0
+ repo_gpgcheck=0
+ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+
+ - name: Installing required packages for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS']"
+ yum:
+ name: ['bind-utils', 'yum-utils', 'device-mapper-persistent-data', 'lvm2', 'telnet', 'kubelet-1.15.5', 'kubeadm-1.15.5', 'kubectl-1.15.5', 'firewalld', 'curl']
+ state: present
+
+
+ - name: "Configuring Docker-CE repo for RHEL/CentOS"
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ get_url:
+ url: https://download.docker.com/linux/centos/docker-ce.repo
+ dest: /etc/yum.repos.d/docker-ce.repo
+ mode: 0644
+
+ - name: Install Docker-CE Engine on RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ args:
+ warn: false
+ shell: yum install docker -y
+
+ - name: SetEnforce for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ ignore_errors: yes
+ command: "setenforce 0"
+
+ - name: SELinux for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ args:
+ warn: false
+ command: sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
+
+ - name: Enable Firewall Service for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ service:
+ name: firewalld
+ state: started
+ enabled: yes
+ ignore_errors: yes
+
+ - name: Allow Network Ports in Firewalld for RHEL/CentOS
+ when: "ansible_distribution in ['RedHat', 'CentOS'] and 'running' not in k8sup.stdout"
+ firewalld:
+ port: "{{ item }}"
+ state: enabled
+ permanent: yes
+ immediate: yes
+ with_items:
+ - "6443/tcp"
+ - "10250/tcp"
+
+
+ - name: Remove swapfile from /etc/fstab
+ when: "'running' not in k8sup.stdout"
+ mount:
+ name: "{{ item }}"
+ fstype: swap
+ state: absent
+ with_items:
+ - swap
+ - none
+
+ - name: Disable swap
+ when: "'running' not in k8sup.stdout"
+ command: swapoff -a
+
+ - name: Starting and enabling the required services
+ when: "'running' not in k8sup.stdout"
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ ignore_errors: yes
+ with_items:
+ - docker
+ - kubelet
+
diff --git a/terraform/aws.tf b/terraform/aws.tf
new file mode 100644
index 0000000..cd8f645
--- /dev/null
+++ b/terraform/aws.tf
@@ -0,0 +1,45 @@
+provider "aws" {
+ access_key= var.access_key
+ secret_key= var.secret_key
+ region= var.region
+}
+
+resource "aws_instance" "master" {
+ ami= var.ami
+ key_name= var.key_name
+ instance_type= var.master_instance_type
+ count = var.master_count
+ tags = {
+ Name = "${var.master_tags}-${count.index}"
+ }
+}
+
+resource "aws_instance" "worker" {
+ ami= var.ami
+ key_name= var.key_name
+ instance_type= var.node_instance_type
+ count = var.worker_count
+ tags = {
+ Name = "${var.worker_tags}-${count.index}"
+ }
+}
+
+resource "aws_eip" "eip-master" {
+ count = var.master_count
+ instance = element(aws_instance.master.*.id,count.index)
+ vpc = true
+ tags = {
+ Name = "${var.master_tags}-${count.index}"
+ }
+}
+
+resource "aws_eip" "eip-worker" {
+ count = var.worker_count
+ instance = element(aws_instance.worker.*.id,count.index)
+ vpc = true
+ tags = {
+ Name = "${var.worker_tags}-${count.index}"
+ }
+
+}
+
diff --git a/terraform/k8scluster.sh b/terraform/k8scluster.sh
new file mode 100644
index 0000000..5916bf5
--- /dev/null
+++ b/terraform/k8scluster.sh
@@ -0,0 +1,15 @@
+
+cd terraform
+terraform init
+terraform plan
+terraform apply -auto-approve
+terraform output inventory > ../ansible/inventory
+
+echo "Please wait for a while to bring aws instances up"
+
+sleep 60
+cd ../ansible
+ansible -m ping -i inventory all
+ansible-playbook -i inventory prerequisites.yaml
+ansible-playbook -i inventory k8s.yaml
+
diff --git a/terraform/output.tf b/terraform/output.tf
new file mode 100644
index 0000000..ff3eae5
--- /dev/null
+++ b/terraform/output.tf
@@ -0,0 +1,31 @@
+data "template_file" "masters_ansible" {
+ template = "$${host} ansible_ssh_host=$${ip} ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'"
+ count = var.master_count
+ vars = {
+ host = "${aws_eip.eip-master.*.public_dns[count.index]}"
+ ip = "${aws_eip.eip-master.*.public_ip[count.index]}"
+ }
+}
+
+data "template_file" "workers_ansible" {
+ template = "$${host} ansible_ssh_host=$${ip} ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'"
+ count = var.worker_count
+ vars = {
+ host = "${aws_eip.eip-worker.*.public_dns[count.index]}"
+ ip = "${aws_eip.eip-worker.*.public_ip[count.index]}"
+ }
+}
+
+
+data "template_file" "inventory" {
+ template = "\n[k8s-masters]\n$${masters}\n\n[k8s-workers]\n$${workers}"
+ vars = {
+ masters = "${join("\n",data.template_file.masters_ansible.*.rendered)}"
+ workers = "${join("\n",data.template_file.workers_ansible.*.rendered)}"
+
+ }
+}
+
+output "inventory" {
+ value = "${data.template_file.inventory.rendered}"
+}
diff --git a/terraform/terraform.tfstate b/terraform/terraform.tfstate
new file mode 100644
index 0000000..220d182
--- /dev/null
+++ b/terraform/terraform.tfstate
@@ -0,0 +1,422 @@
+{
+ "version": 4,
+ "terraform_version": "0.12.21",
+ "serial": 11,
+ "lineage": "800c963d-9650-2e81-5ef9-1db9bc31d7aa",
+ "outputs": {
+ "inventory": {
+ "value": "\n[k8s-masters]\nec2-52-52-180-22.us-west-1.compute.amazonaws.com ansible_ssh_host=52.52.180.22 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'\n\n[k8s-workers]\nec2-13-57-111-53.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.111.53 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'\nec2-13-57-45-138.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.45.138 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "type": "string"
+ }
+ },
+ "resources": [
+ {
+ "mode": "data",
+ "type": "template_file",
+ "name": "inventory",
+ "provider": "provider.template",
+ "instances": [
+ {
+ "schema_version": 0,
+ "attributes": {
+ "filename": null,
+ "id": "fc1c55650085f1db0c70cae9796a32e96e4cbf55680466a7b42875886a04a8c9",
+ "rendered": "\n[k8s-masters]\nec2-52-52-180-22.us-west-1.compute.amazonaws.com ansible_ssh_host=52.52.180.22 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'\n\n[k8s-workers]\nec2-13-57-111-53.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.111.53 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'\nec2-13-57-45-138.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.45.138 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "template": "\n[k8s-masters]\n${masters}\n\n[k8s-workers]\n${workers}",
+ "vars": {
+ "masters": "ec2-52-52-180-22.us-west-1.compute.amazonaws.com ansible_ssh_host=52.52.180.22 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "workers": "ec2-13-57-111-53.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.111.53 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'\nec2-13-57-45-138.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.45.138 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'"
+ }
+ }
+ }
+ ]
+ },
+ {
+ "mode": "data",
+ "type": "template_file",
+ "name": "masters_ansible",
+ "each": "list",
+ "provider": "provider.template",
+ "instances": [
+ {
+ "index_key": 0,
+ "schema_version": 0,
+ "attributes": {
+ "filename": null,
+ "id": "3f66e5aa5093b546a9873140de4c3ff366ae74243e05ad4a3b4998871d458ca8",
+ "rendered": "ec2-52-52-180-22.us-west-1.compute.amazonaws.com ansible_ssh_host=52.52.180.22 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "template": "${host} ansible_ssh_host=${ip} ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "vars": {
+ "host": "ec2-52-52-180-22.us-west-1.compute.amazonaws.com",
+ "ip": "52.52.180.22"
+ }
+ }
+ }
+ ]
+ },
+ {
+ "mode": "data",
+ "type": "template_file",
+ "name": "workers_ansible",
+ "each": "list",
+ "provider": "provider.template",
+ "instances": [
+ {
+ "index_key": 0,
+ "schema_version": 0,
+ "attributes": {
+ "filename": null,
+ "id": "219e1114de87d7dfacb58b3352631c5c7a1ac64c34110af12c4988254eeba896",
+ "rendered": "ec2-13-57-111-53.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.111.53 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "template": "${host} ansible_ssh_host=${ip} ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "vars": {
+ "host": "ec2-13-57-111-53.us-west-1.compute.amazonaws.com",
+ "ip": "13.57.111.53"
+ }
+ }
+ },
+ {
+ "index_key": 1,
+ "schema_version": 0,
+ "attributes": {
+ "filename": null,
+ "id": "d930342ed62c8b20cb591112420d4a35e5da20ad02c05a338cc646e05208c3f2",
+ "rendered": "ec2-13-57-45-138.us-west-1.compute.amazonaws.com ansible_ssh_host=13.57.45.138 ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "template": "${host} ansible_ssh_host=${ip} ansible_ssh_port=22 ansible_ssh_user=ubuntu ansible_ssh_private_key_file=/Users/aguda/Downloads/AWS/awstest.pem ansible_ssh_extra_args='-o StrictHostKeyChecking=no'",
+ "vars": {
+ "host": "ec2-13-57-45-138.us-west-1.compute.amazonaws.com",
+ "ip": "13.57.45.138"
+ }
+ }
+ }
+ ]
+ },
+ {
+ "mode": "managed",
+ "type": "aws_eip",
+ "name": "eip-master",
+ "each": "list",
+ "provider": "provider.aws",
+ "instances": [
+ {
+ "index_key": 0,
+ "schema_version": 0,
+ "attributes": {
+ "allocation_id": null,
+ "associate_with_private_ip": null,
+ "association_id": "eipassoc-0f3bc559cd345b14b",
+ "domain": "vpc",
+ "id": "eipalloc-05c973bca91e59368",
+ "instance": "i-099f9566ea30f2a87",
+ "network_interface": "eni-05f073c8bd3a1199b",
+ "private_dns": "ip-172-31-19-229.us-west-1.compute.internal",
+ "private_ip": "172.31.19.229",
+ "public_dns": "ec2-52-52-180-22.us-west-1.compute.amazonaws.com",
+ "public_ip": "52.52.180.22",
+ "public_ipv4_pool": "amazon",
+ "tags": {
+ "Name": "csbmaster-0"
+ },
+ "timeouts": null,
+ "vpc": true
+ },
+ "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjoxODAwMDAwMDAwMDAsInJlYWQiOjkwMDAwMDAwMDAwMCwidXBkYXRlIjozMDAwMDAwMDAwMDB9fQ==",
+ "dependencies": [
+ "aws_instance.master"
+ ]
+ }
+ ]
+ },
+ {
+ "mode": "managed",
+ "type": "aws_eip",
+ "name": "eip-worker",
+ "each": "list",
+ "provider": "provider.aws",
+ "instances": [
+ {
+ "index_key": 0,
+ "schema_version": 0,
+ "attributes": {
+ "allocation_id": null,
+ "associate_with_private_ip": null,
+ "association_id": "eipassoc-00bd3b6aca44e40b9",
+ "domain": "vpc",
+ "id": "eipalloc-03d9720c5ed3311c2",
+ "instance": "i-04a78d66fb88e386d",
+ "network_interface": "eni-01a9dbc989f5442b6",
+ "private_dns": "ip-172-31-18-76.us-west-1.compute.internal",
+ "private_ip": "172.31.18.76",
+ "public_dns": "ec2-13-57-111-53.us-west-1.compute.amazonaws.com",
+ "public_ip": "13.57.111.53",
+ "public_ipv4_pool": "amazon",
+ "tags": {
+ "Name": "csbworker-0"
+ },
+ "timeouts": null,
+ "vpc": true
+ },
+ "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjoxODAwMDAwMDAwMDAsInJlYWQiOjkwMDAwMDAwMDAwMCwidXBkYXRlIjozMDAwMDAwMDAwMDB9fQ==",
+ "dependencies": [
+ "aws_instance.worker"
+ ]
+ },
+ {
+ "index_key": 1,
+ "schema_version": 0,
+ "attributes": {
+ "allocation_id": null,
+ "associate_with_private_ip": null,
+ "association_id": "eipassoc-003cdf28e3f655403",
+ "domain": "vpc",
+ "id": "eipalloc-0e4c77820870663a6",
+ "instance": "i-05ab26e03e0864d31",
+ "network_interface": "eni-00d082a1c05ad95d8",
+ "private_dns": "ip-172-31-28-18.us-west-1.compute.internal",
+ "private_ip": "172.31.28.18",
+ "public_dns": "ec2-13-57-45-138.us-west-1.compute.amazonaws.com",
+ "public_ip": "13.57.45.138",
+ "public_ipv4_pool": "amazon",
+ "tags": {
+ "Name": "csbworker-1"
+ },
+ "timeouts": null,
+ "vpc": true
+ },
+ "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjoxODAwMDAwMDAwMDAsInJlYWQiOjkwMDAwMDAwMDAwMCwidXBkYXRlIjozMDAwMDAwMDAwMDB9fQ==",
+ "dependencies": [
+ "aws_instance.worker"
+ ]
+ }
+ ]
+ },
+ {
+ "mode": "managed",
+ "type": "aws_instance",
+ "name": "master",
+ "each": "list",
+ "provider": "provider.aws",
+ "instances": [
+ {
+ "index_key": 0,
+ "schema_version": 1,
+ "attributes": {
+ "ami": "ami-03ba3948f6c37a4b0",
+ "arn": "arn:aws:ec2:us-west-1:599322425409:instance/i-099f9566ea30f2a87",
+ "associate_public_ip_address": true,
+ "availability_zone": "us-west-1b",
+ "cpu_core_count": 2,
+ "cpu_threads_per_core": 1,
+ "credit_specification": [
+ {
+ "cpu_credits": "standard"
+ }
+ ],
+ "disable_api_termination": false,
+ "ebs_block_device": [],
+ "ebs_optimized": false,
+ "ephemeral_block_device": [],
+ "get_password_data": false,
+ "hibernation": false,
+ "host_id": null,
+ "iam_instance_profile": "",
+ "id": "i-099f9566ea30f2a87",
+ "instance_initiated_shutdown_behavior": null,
+ "instance_state": "running",
+ "instance_type": "t2.medium",
+ "ipv6_address_count": 0,
+ "ipv6_addresses": [],
+ "key_name": "awstest",
+ "monitoring": false,
+ "network_interface": [],
+ "network_interface_id": null,
+ "password_data": "",
+ "placement_group": "",
+ "primary_network_interface_id": "eni-05f073c8bd3a1199b",
+ "private_dns": "ip-172-31-19-229.us-west-1.compute.internal",
+ "private_ip": "172.31.19.229",
+ "public_dns": "ec2-54-219-174-66.us-west-1.compute.amazonaws.com",
+ "public_ip": "54.219.174.66",
+ "root_block_device": [
+ {
+ "delete_on_termination": true,
+ "encrypted": false,
+ "iops": 100,
+ "kms_key_id": "",
+ "volume_id": "vol-0680879666235627c",
+ "volume_size": 8,
+ "volume_type": "gp2"
+ }
+ ],
+ "security_groups": [
+ "default"
+ ],
+ "source_dest_check": true,
+ "subnet_id": "subnet-7bed151d",
+ "tags": {
+ "Name": "csbmaster-0"
+ },
+ "tenancy": "default",
+ "timeouts": null,
+ "user_data": null,
+ "user_data_base64": null,
+ "volume_tags": {},
+ "vpc_security_group_ids": [
+ "sg-0476917b"
+ ]
+ },
+ "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMCwidXBkYXRlIjo2MDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjEifQ=="
+ }
+ ]
+ },
+ {
+ "mode": "managed",
+ "type": "aws_instance",
+ "name": "worker",
+ "each": "list",
+ "provider": "provider.aws",
+ "instances": [
+ {
+ "index_key": 0,
+ "schema_version": 1,
+ "attributes": {
+ "ami": "ami-03ba3948f6c37a4b0",
+ "arn": "arn:aws:ec2:us-west-1:599322425409:instance/i-04a78d66fb88e386d",
+ "associate_public_ip_address": true,
+ "availability_zone": "us-west-1b",
+ "cpu_core_count": 2,
+ "cpu_threads_per_core": 1,
+ "credit_specification": [
+ {
+ "cpu_credits": "standard"
+ }
+ ],
+ "disable_api_termination": false,
+ "ebs_block_device": [],
+ "ebs_optimized": false,
+ "ephemeral_block_device": [],
+ "get_password_data": false,
+ "hibernation": false,
+ "host_id": null,
+ "iam_instance_profile": "",
+ "id": "i-04a78d66fb88e386d",
+ "instance_initiated_shutdown_behavior": null,
+ "instance_state": "running",
+ "instance_type": "t2.medium",
+ "ipv6_address_count": 0,
+ "ipv6_addresses": [],
+ "key_name": "awstest",
+ "monitoring": false,
+ "network_interface": [],
+ "network_interface_id": null,
+ "password_data": "",
+ "placement_group": "",
+ "primary_network_interface_id": "eni-01a9dbc989f5442b6",
+ "private_dns": "ip-172-31-18-76.us-west-1.compute.internal",
+ "private_ip": "172.31.18.76",
+ "public_dns": "ec2-13-56-163-33.us-west-1.compute.amazonaws.com",
+ "public_ip": "13.56.163.33",
+ "root_block_device": [
+ {
+ "delete_on_termination": true,
+ "encrypted": false,
+ "iops": 100,
+ "kms_key_id": "",
+ "volume_id": "vol-02cfdbb5acbea650e",
+ "volume_size": 8,
+ "volume_type": "gp2"
+ }
+ ],
+ "security_groups": [
+ "default"
+ ],
+ "source_dest_check": true,
+ "subnet_id": "subnet-7bed151d",
+ "tags": {
+ "Name": "csbworker-0"
+ },
+ "tenancy": "default",
+ "timeouts": null,
+ "user_data": null,
+ "user_data_base64": null,
+ "volume_tags": {},
+ "vpc_security_group_ids": [
+ "sg-0476917b"
+ ]
+ },
+ "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMCwidXBkYXRlIjo2MDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjEifQ=="
+ },
+ {
+ "index_key": 1,
+ "schema_version": 1,
+ "attributes": {
+ "ami": "ami-03ba3948f6c37a4b0",
+ "arn": "arn:aws:ec2:us-west-1:599322425409:instance/i-05ab26e03e0864d31",
+ "associate_public_ip_address": true,
+ "availability_zone": "us-west-1b",
+ "cpu_core_count": 2,
+ "cpu_threads_per_core": 1,
+ "credit_specification": [
+ {
+ "cpu_credits": "standard"
+ }
+ ],
+ "disable_api_termination": false,
+ "ebs_block_device": [],
+ "ebs_optimized": false,
+ "ephemeral_block_device": [],
+ "get_password_data": false,
+ "hibernation": false,
+ "host_id": null,
+ "iam_instance_profile": "",
+ "id": "i-05ab26e03e0864d31",
+ "instance_initiated_shutdown_behavior": null,
+ "instance_state": "running",
+ "instance_type": "t2.medium",
+ "ipv6_address_count": 0,
+ "ipv6_addresses": [],
+ "key_name": "awstest",
+ "monitoring": false,
+ "network_interface": [],
+ "network_interface_id": null,
+ "password_data": "",
+ "placement_group": "",
+ "primary_network_interface_id": "eni-00d082a1c05ad95d8",
+ "private_dns": "ip-172-31-28-18.us-west-1.compute.internal",
+ "private_ip": "172.31.28.18",
+ "public_dns": "ec2-54-67-43-26.us-west-1.compute.amazonaws.com",
+ "public_ip": "54.67.43.26",
+ "root_block_device": [
+ {
+ "delete_on_termination": true,
+ "encrypted": false,
+ "iops": 100,
+ "kms_key_id": "",
+ "volume_id": "vol-0e9314817428218f0",
+ "volume_size": 8,
+ "volume_type": "gp2"
+ }
+ ],
+ "security_groups": [
+ "default"
+ ],
+ "source_dest_check": true,
+ "subnet_id": "subnet-7bed151d",
+ "tags": {
+ "Name": "csbworker-1"
+ },
+ "tenancy": "default",
+ "timeouts": null,
+ "user_data": null,
+ "user_data_base64": null,
+ "volume_tags": {},
+ "vpc_security_group_ids": [
+ "sg-0476917b"
+ ]
+ },
+ "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMCwidXBkYXRlIjo2MDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjEifQ=="
+ }
+ ]
+ }
+ ]
+}
diff --git a/terraform/variabels.tf b/terraform/variabels.tf
new file mode 100644
index 0000000..a88e7f7
--- /dev/null
+++ b/terraform/variabels.tf
@@ -0,0 +1,38 @@
+variable "access_key" {
+ default = "AKIAYXCTGPBA24LFMYUV"
+}
+variable "secret_key" {
+ default = "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"
+}
+variable "worker_count" {
+ default = 2
+}
+variable "master_count" {
+ default = 1
+}
+variable "key_name" {
+ default = "awstest"
+}
+variable "region" {
+ default = "us-west-1"
+}
+variable "ami" {
+ default = "ami-03ba3948f6c37a4b0"
+}
+variable "node_instance_type" {
+ default = "t2.medium"
+}
+
+variable "master_instance_type" {
+ default = "t2.medium"
+}
+variable "master_tags" {
+ default = "csbmaster"
+}
+
+variable "worker_tags" {
+ default = "csbworker"
+}
+variable "state" {
+ default = "running"
+}