more real world exampls; fix scanner when scanning empty character in lexer

This commit is contained in:
mhoffm
2021-06-21 19:40:27 +02:00
parent 3bc4153bc2
commit a7f23cc579
2020 changed files with 64141 additions and 10630 deletions

View File

@@ -0,0 +1,9 @@
terraform {
backend "remote" {
organization = "emea-se-playground-2019"
workspaces {
name = "Jason-EKS"
}
}
}

View File

@@ -0,0 +1,35 @@
module "eks" {
source = "terraform-aws-modules/eks/aws"
cluster_name = local.cluster_name
cluster_version = "1.18"
subnets = module.vpc.public_subnets
vpc_id = module.vpc.vpc_id
workers_group_defaults = {
root_volume_type = "gp2"
}
worker_groups = [
{
name = "jasonb-test-group-1"
instance_type = "t2.small"
asg_desired_capacity = 2
additional_security_group_ids = [aws_security_group.external_connection.id]
},
{
name = "jasonb-test-group-2"
instance_type = "t2.medium"
additional_security_group_ids = [aws_security_group.external_connection.id]
asg_desired_capacity = 1
},
]
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}

View File

@@ -0,0 +1,12 @@
# Kubernetes provider
# https://learn.hashicorp.com/terraform/kubernetes/provision-eks-cluster#optional-configure-terraform-kubernetes-provider
# To learn how to schedule deployments and services using the provider, go here: https://learn.hashicorp.com/terraform/kubernetes/deploy-nginx-kubernetes
# The Kubernetes provider is included in this file so the EKS module can complete successfully. Otherwise, it throws an error when creating `kubernetes_config_map.aws_auth`.
# You should absolutely **not** schedule deployments and services in this workspace. This keeps workspaces modular (one for provision EKS, another for scheduling Kubernetes resources) as per best practices.
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
}

View File

@@ -0,0 +1,7 @@
# token= "a5c882f6-02b5-95ea-8698-8189b55abacc"
template {
source = "dns.tmpl"
destination = "zone.db"
# command= "named-checkconf && named-checkzone methridge.dev zone.db && rndc reload"
}

View File

@@ -0,0 +1,30 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.20.0"
}
local = {
source = "hashicorp/local"
version = "2.0.0"
}
null = {
source = "hashicorp/null"
version = "3.0.0"
}
template = {
source = "hashicorp/template"
version = "2.2.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.1"
}
}
required_version = "~> 0.14"
}

View File

@@ -0,0 +1,39 @@
provider "aws" {
region = "eu-west-1"
}
data "aws_availability_zones" "available" {}
locals {
cluster_name = "jasonb-eks"
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.66.0"
name = "jasonb-vpc"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_dns_hostnames = true
}
resource "aws_security_group" "external_connection" {
name_prefix = "all_worker_management"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"0.0.0.0/0",
]
}
}

View File

@@ -0,0 +1,43 @@
module "eks" {
source = "terraform-aws-modules/eks/aws"
cluster_name = local.cluster_name
cluster_version = "1.20"
subnets = module.vpc.private_subnets
tags = {
Environment = "training"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
vpc_id = module.vpc.vpc_id
workers_group_defaults = {
root_volume_type = "gp2"
}
worker_groups = [
{
name = "worker-group-1"
instance_type = "t2.small"
additional_userdata = "echo foo bar"
asg_desired_capacity = 2
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
},
{
name = "worker-group-2"
instance_type = "t2.medium"
additional_userdata = "echo foo bar"
additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
asg_desired_capacity = 1
},
]
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}

View File

@@ -0,0 +1,12 @@
# Kubernetes provider
# https://learn.hashicorp.com/terraform/kubernetes/provision-eks-cluster#optional-configure-terraform-kubernetes-provider
# To learn how to schedule deployments and services using the provider, go here: https://learn.hashicorp.com/terraform/kubernetes/deploy-nginx-kubernetes
# The Kubernetes provider is included in this file so the EKS module can complete successfully. Otherwise, it throws an error when creating `kubernetes_config_map.aws_auth`.
# You should **not** schedule deployments and services in this workspace. This keeps workspaces modular (one for provision EKS, another for scheduling Kubernetes resources) as per best practices.
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
}

View File

@@ -0,0 +1,34 @@
output "cluster_id" {
description = "EKS cluster ID."
value = module.eks.cluster_id
}
output "cluster_endpoint" {
description = "Endpoint for EKS control plane."
value = module.eks.cluster_endpoint
}
output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
output "kubectl_config" {
description = "kubectl config as generated by the module."
value = module.eks.kubeconfig
}
output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this EKS cluster."
value = module.eks.config_map_aws_auth
}
output "region" {
description = "AWS region"
value = var.region
}
output "cluster_name" {
description = "Kubernetes Cluster Name"
value = local.cluster_name
}

View File

@@ -0,0 +1,47 @@
resource "aws_security_group" "worker_group_mgmt_one" {
name_prefix = "worker_group_mgmt_one"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
]
}
}
resource "aws_security_group" "worker_group_mgmt_two" {
name_prefix = "worker_group_mgmt_two"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"192.168.0.0/16",
]
}
}
resource "aws_security_group" "all_worker_mgmt" {
name_prefix = "all_worker_management"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
}

View File

@@ -0,0 +1,36 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.20.0"
}
random = {
source = "hashicorp/random"
version = "3.0.0"
}
local = {
source = "hashicorp/local"
version = "2.0.0"
}
null = {
source = "hashicorp/null"
version = "3.0.0"
}
template = {
source = "hashicorp/template"
version = "2.2.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.1"
}
}
required_version = "~> 0.14"
}

View File

@@ -0,0 +1,47 @@
variable "region" {
default = "eu-west-1"
description = "AWS region"
}
provider "aws" {
region = "eu-west-1"
}
data "aws_availability_zones" "available" {}
locals {
cluster_name = "education-eks-${random_string.suffix.result}"
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.66.0"
name = "jasonb-vpc"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
}
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}

View File

@@ -0,0 +1,196 @@
//--------------------------EMEA-SE_PLAYGROUND-2019-----------------------------------------
# Using a single workspace:
terraform {
backend "remote" {
hostname = "app.terraform.io"
organization = "emea-se-playground-2019"
workspaces {
name = "Jason-AWS-Demostack"
}
}
}
// Workspace Data
data "terraform_remote_state" "tls" {
backend = "remote"
config = {
hostname = "app.terraform.io"
organization = "emea-se-playground-2019"
workspaces = {
name = "tls-root-certificate"
}
} //config
}
data "terraform_remote_state" "dns" {
backend = "remote"
config = {
hostname = "app.terraform.io"
organization = "emea-se-playground-2019"
workspaces = {
name = "Jason-DNS-Zone"
}
} //network
}
//--------------------------------------------------------------------
provider "aws" {
region = var.primary_region
alias = "primary"
version = "~> 2.0"
}
provider "aws" {
region = var.secondary_region
alias = "secondary"
version = "~> 2.0"
}
provider "aws" {
region = var.tertiary_region
alias = "tertiary"
version = "~> 2.0"
}
module "primarycluster" {
providers = {
aws.demostack = aws.primary
aws = aws.primary
}
source = "./modules"
# count = var.create_primary_cluster ? 1 : 0
owner = var.owner
region = var.primary_region
namespace = var.primary_namespace
public_key = var.public_key
servers = var.servers
workers = var.workers
vaultlicense = var.vaultlicense
consullicense = var.consullicense
nomadlicense = var.nomadlicense
enterprise = var.enterprise
consul_url = var.consul_url
consul_ent_url = var.consul_ent_url
fabio_url = var.fabio_url
nomad_url = var.nomad_url
nomad_ent_url = var.nomad_ent_url
cni_plugin_url = var.cni_plugin_url
vault_url = var.vault_url
vault_ent_url = var.vault_ent_url
created-by = var.created-by
sleep-at-night = var.sleep-at-night
TTL = var.TTL
vpc_cidr_block = var.vpc_cidr_block
cidr_blocks = var.cidr_blocks
instance_type_server = var.instance_type_server
instance_type_worker = var.instance_type_worker
zone_id = data.terraform_remote_state.dns.outputs.aws_sub_zone_id
run_nomad_jobs = var.run_nomad_jobs
host_access_ip = var.host_access_ip
primary_datacenter = var.primary_namespace
# EMEA-SE-PLAYGROUND
ca_key_algorithm = data.terraform_remote_state.tls.outputs.ca_key_algorithm
ca_private_key_pem = data.terraform_remote_state.tls.outputs.ca_private_key_pem
ca_cert_pem = data.terraform_remote_state.tls.outputs.ca_cert_pem
consul_join_tag_value = "${var.namespace}-${data.terraform_remote_state.tls.outputs.consul_join_tag_value}"
consul_gossip_key = data.terraform_remote_state.tls.outputs.consul_gossip_key
consul_master_token = data.terraform_remote_state.tls.outputs.consul_master_token
nomad_gossip_key = data.terraform_remote_state.tls.outputs.nomad_gossip_key
}
module "secondarycluster" {
providers = {
aws.demostack = aws.secondary
aws = aws.secondary
}
source = "./modules"
count = var.create_secondary_cluster ? 1 : 0
owner = var.owner
region = var.secondary_region
namespace = var.secondary_namespace
public_key = var.public_key
servers = var.servers
workers = var.workers
vaultlicense = var.vaultlicense
consullicense = var.consullicense
enterprise = var.enterprise
consul_url = var.consul_url
consul_ent_url = var.consul_ent_url
fabio_url = var.fabio_url
nomad_url = var.nomad_url
nomad_ent_url = var.nomad_ent_url
cni_plugin_url = var.cni_plugin_url
vault_url = var.vault_url
vault_ent_url = var.vault_ent_url
created-by = var.created-by
sleep-at-night = var.sleep-at-night
TTL = var.TTL
vpc_cidr_block = var.vpc_cidr_block
cidr_blocks = var.cidr_blocks
instance_type_server = var.instance_type_server
instance_type_worker = var.instance_type_worker
zone_id = data.terraform_remote_state.dns.outputs.aws_sub_zone_id
run_nomad_jobs = var.run_nomad_jobs
host_access_ip = var.host_access_ip
primary_datacenter = var.primary_namespace
# EMEA-SE-PLAYGROUND
ca_key_algorithm = data.terraform_remote_state.tls.outputs.ca_key_algorithm
ca_private_key_pem = data.terraform_remote_state.tls.outputs.ca_private_key_pem
ca_cert_pem = data.terraform_remote_state.tls.outputs.ca_cert_pem
consul_join_tag_value = "${var.namespace}-${data.terraform_remote_state.tls.outputs.consul_join_tag_value}"
consul_gossip_key = data.terraform_remote_state.tls.outputs.consul_gossip_key
consul_master_token = data.terraform_remote_state.tls.outputs.consul_master_token
nomad_gossip_key = data.terraform_remote_state.tls.outputs.nomad_gossip_key
}
module "tertiarycluster" {
providers = {
aws.demostack = aws.tertiary
aws = aws.tertiary
}
source = "./modules"
count = var.create_secondary_cluster ? 1 : 0
owner = var.owner
region = var.tertiary_region
namespace = var.tertiary_namespace
public_key = var.public_key
servers = var.servers
workers = var.workers
vaultlicense = var.vaultlicense
consullicense = var.consullicense
enterprise = var.enterprise
consul_url = var.consul_url
consul_ent_url = var.consul_ent_url
fabio_url = var.fabio_url
nomad_url = var.nomad_url
nomad_ent_url = var.nomad_ent_url
cni_plugin_url = var.cni_plugin_url
vault_url = var.vault_url
vault_ent_url = var.vault_ent_url
created-by = var.created-by
sleep-at-night = var.sleep-at-night
TTL = var.TTL
vpc_cidr_block = var.vpc_cidr_block
cidr_blocks = var.cidr_blocks
instance_type_server = var.instance_type_server
instance_type_worker = var.instance_type_worker
zone_id = data.terraform_remote_state.dns.outputs.aws_sub_zone_id
run_nomad_jobs = var.run_nomad_jobs
host_access_ip = var.host_access_ip
primary_datacenter = var.primary_datacenter
# EMEA-SE-PLAYGROUND
ca_key_algorithm = data.terraform_remote_state.tls.outputs.ca_key_algorithm
ca_private_key_pem = data.terraform_remote_state.tls.outputs.ca_private_key_pem
ca_cert_pem = data.terraform_remote_state.tls.outputs.ca_cert_pem
consul_join_tag_value = "${var.namespace}-${data.terraform_remote_state.tls.outputs.consul_join_tag_value}"
consul_gossip_key = data.terraform_remote_state.tls.outputs.consul_gossip_key
consul_master_token = data.terraform_remote_state.tls.outputs.consul_master_token
nomad_gossip_key = data.terraform_remote_state.tls.outputs.nomad_gossip_key
}

View File

@@ -0,0 +1,264 @@
terraform {
required_version = ">= 0.11.0"
}
//Getting the Domaing name
data "aws_route53_zone" "fdqn" {
zone_id = var.zone_id
}
data "aws_ami" "ubuntu" {
most_recent = true
# ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*
#ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*
filter {
name = "name"
# values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*"]
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"] # Canonical
}
resource "aws_vpc" "demostack" {
cidr_block = var.vpc_cidr_block
enable_dns_hostnames = true
tags = local.common_tags
}
resource "aws_internet_gateway" "demostack" {
vpc_id = aws_vpc.demostack.id
tags = local.common_tags
}
resource "aws_route" "internet_access" {
route_table_id = aws_vpc.demostack.main_route_table_id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.demostack.id
}
data "aws_availability_zones" "available" {}
resource "aws_subnet" "demostack" {
count = length(var.cidr_blocks)
vpc_id = aws_vpc.demostack.id
availability_zone = data.aws_availability_zones.available.names[count.index]
cidr_block = var.cidr_blocks[count.index]
map_public_ip_on_launch = true
tags = local.common_tags
}
resource "aws_security_group" "demostack" {
name_prefix = var.namespace
vpc_id = aws_vpc.demostack.id
tags = local.common_tags
#Allow internal communication between nodes
ingress {
from_port = -1
to_port = -1
protocol = -1
}
# SSH access if host_access_ip has CIDR blocks
dynamic "ingress" {
for_each = var.host_access_ip
content {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [ingress.value]
}
}
#HTTP
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Demostack LDAP
ingress {
from_port = 389
to_port = 389
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Demostack HTTPS
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Grafana
ingress {
from_port = 1521
to_port = 1521
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Grafana
ingress {
from_port = 3000
to_port = 3000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Demostack Postgres + pgadmin
ingress {
from_port = 5000
to_port = 5500
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Consul and Vault and Boundary ports
ingress {
from_port = 8000
to_port = 9300
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Fabio Ports
ingress {
from_port = 9998
to_port = 9999
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#Nomad
ingress {
from_port = 3000
to_port = 4999
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
#More nomad ports
ingress {
from_port = 20000
to_port = 29999
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 30000
to_port = 39999
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_key_pair" "demostack" {
key_name = var.namespace
public_key = var.public_key
tags = local.common_tags
}
resource "aws_iam_instance_profile" "consul-join" {
name = "${var.namespace}-consul-join"
role = aws_iam_role.consul-join.name
}
resource "aws_kms_key" "demostackVaultKeys" {
description = "KMS for the Consul Demo Vault"
deletion_window_in_days = 10
tags = local.common_tags
}
resource "aws_iam_policy" "consul-join" {
name = "${var.namespace}-consul-join"
description = "Allows Consul nodes to describe instances for joining."
policy = data.aws_iam_policy_document.vault-server.json
}
resource "aws_iam_role" "consul-join" {
name = "${var.namespace}-consul-join"
assume_role_policy = file("${path.module}/templates/policies/assume-role.json")
tags = local.common_tags
}
resource "aws_iam_policy_attachment" "consul-join" {
name = "${var.namespace}-consul-join"
roles = [aws_iam_role.consul-join.name]
policy_arn = aws_iam_policy.consul-join.arn
}
data "aws_iam_policy_document" "vault-server" {
statement {
sid = "VaultKMSUnseal"
effect = "Allow"
actions = [
"kms:Encrypt",
"kms:Decrypt",
"kms:DescribeKey",
]
resources = [aws_kms_key.demostackVaultKeys.arn]
}
statement {
effect = "Allow"
actions = [
"ec2:DescribeInstances",
"iam:PassRole",
"iam:ListRoles",
"cloudwatch:PutMetricData",
"ds:DescribeDirectories",
"ec2:DescribeInstanceStatus",
"logs:*",
"ec2messages:*",
"ec2:DescribeInstances",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:AttachVolume",
"ec2:DetachVolume",
]
resources = ["*"]
}
}

View File

@@ -0,0 +1,40 @@
resource "aws_lb" "boundary-controller" {
name = "${var.namespace}-boundary-controller"
load_balancer_type = "network"
internal = false
subnets = aws_subnet.demostack.*.id
tags = local.common_tags
}
resource "aws_lb_target_group" "boundary-controller" {
name = "${var.namespace}-boundary-controller"
port = 9200
protocol = "TCP"
vpc_id = aws_vpc.demostack.id
stickiness {
enabled = false
type = "lb_cookie"
}
tags = local.common_tags
}
resource "aws_lb_target_group_attachment" "boundary-controller" {
count = var.servers
target_group_arn = aws_lb_target_group.boundary-controller.arn
target_id = aws_instance.servers[count.index].id
port = 9200
}
resource "aws_lb_listener" "boundary-controller" {
load_balancer_arn = aws_lb.boundary-controller.arn
port = "9200"
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.boundary-controller.arn
}
}

View File

@@ -0,0 +1,51 @@
resource "aws_alb" "consul" {
name = "${var.namespace}-consul"
security_groups = [aws_security_group.demostack.id]
subnets = aws_subnet.demostack.*.id
tags = local.common_tags
}
resource "aws_alb_target_group" "consul" {
name = "${var.namespace}-consul"
port = "8500"
vpc_id = aws_vpc.demostack.id
protocol = "HTTP"
health_check {
interval = "5"
timeout = "2"
path = "/v1/status/leader"
port = "8500"
protocol = "HTTP"
matcher = "200,429"
healthy_threshold = 2
}
}
resource "aws_alb_listener" "consul" {
depends_on = [
aws_acm_certificate_validation.cert
]
load_balancer_arn = aws_alb.consul.arn
port = "8500"
protocol = "HTTPS"
certificate_arn = aws_acm_certificate_validation.cert.certificate_arn
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
default_action {
target_group_arn = aws_alb_target_group.consul.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "consul" {
count = var.servers
target_group_arn = aws_alb_target_group.consul.arn
target_id = element(aws_instance.servers.*.id, count.index)
port = "8500"
}

View File

@@ -0,0 +1,85 @@
resource "aws_route53_record" "boundary" {
zone_id = var.zone_id
name = "boundary.${var.namespace}"
#name = "traefik"
type = "CNAME"
records = [aws_lb.boundary-controller.dns_name]
ttl = "300"
}
resource "aws_route53_record" "traefik" {
zone_id = var.zone_id
name = "traefik.${var.namespace}"
#name = "traefik"
type = "CNAME"
records = [aws_alb.traefik.dns_name]
ttl = "300"
}
resource "aws_route53_record" "fabio" {
zone_id = var.zone_id
name = "fabio.${var.namespace}"
#name = "fabio"
type = "CNAME"
records = [aws_alb.fabio.dns_name]
ttl = "300"
}
resource "aws_route53_record" "consul" {
zone_id = var.zone_id
name = "consul.${var.namespace}"
#name = "consul"
type = "CNAME"
records = [aws_alb.consul.dns_name]
ttl = "300"
}
resource "aws_route53_record" "nomad" {
zone_id = var.zone_id
name = "nomad.${var.namespace}"
// name = "nomad"
type = "CNAME"
records = [aws_alb.nomad.dns_name]
ttl = "300"
}
resource "aws_route53_record" "vault" {
zone_id = var.zone_id
name = "vault.${var.namespace}"
// name = "vault"
type = "CNAME"
records = [aws_alb.vault.dns_name]
ttl = "300"
}
resource "aws_route53_record" "servers" {
count = var.servers
zone_id = var.zone_id
name = "server-${count.index}.${var.namespace}"
// name = "server-${count.index}"
type = "CNAME"
records = [element(aws_instance.servers.*.public_dns, count.index)]
ttl = "300"
}
resource "aws_route53_record" "workers" {
count = var.workers
zone_id = var.zone_id
name = "workers-${count.index}.${var.namespace}"
// name = "workers-${count.index}"
type = "CNAME"
records = [element(aws_instance.workers.*.public_dns, count.index)]
ttl = "300"
}

View File

@@ -0,0 +1,25 @@
resource "aws_ebs_volume" "mysql" {
availability_zone = data.aws_availability_zones.available.names[0]
size = 40
tags = local.common_tags
}
resource "aws_ebs_volume" "mongodb" {
availability_zone = data.aws_availability_zones.available.names[0]
size = 40
tags = local.common_tags
}
resource "aws_ebs_volume" "prometheus" {
availability_zone = data.aws_availability_zones.available.names[0]
size = 40
tags = local.common_tags
}
resource "aws_ebs_volume" "shared" {
availability_zone = data.aws_availability_zones.available.names[0]
size = 40
tags = local.common_tags
}

View File

@@ -0,0 +1,106 @@
/*
resource "aws_iam_role" "eks" {
name = "${var.namespace}-eks"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "eks-AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.eks.name
}
resource "aws_iam_role_policy_attachment" "eks-AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = aws_iam_role.eks.name
}
resource "aws_eks_cluster" "eks" {
name = "${var.namespace}-eks"
role_arn = aws_iam_role.eks.arn
vpc_config {
security_group_ids = [aws_security_group.demostack.id]
subnet_ids = aws_subnet.demostack.*.id
}
depends_on = [
aws_iam_role_policy_attachment.eks-AmazonEKSClusterPolicy,
aws_iam_role_policy_attachment.eks-AmazonEKSServicePolicy,
]
}
# EKS Worker Nodes Resources
# * IAM role allowing Kubernetes actions to access other AWS services
# * EKS Node Group to launch worker nodes
#
resource "aws_iam_role" "eks-node" {
name = "${var.namespace}-eks-nodes"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.eks-node.name
}
resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.eks-node.name
}
resource "aws_iam_role_policy_attachment" "eks-node-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.eks-node.name
}
resource "aws_eks_node_group" "eks-node" {
cluster_name = aws_eks_cluster.eks.name
node_group_name = "demostack"
node_role_arn = aws_iam_role.eks-node.arn
subnet_ids = aws_subnet.demostack.*.id
instance_types = [var.instance_type_worker]
scaling_config {
desired_size = 2
max_size = 2
min_size = 2
}
depends_on = [
aws_iam_role_policy_attachment.eks-node-AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.eks-node-AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.eks-node-AmazonEC2ContainerRegistryReadOnly,
]
}
*/

View File

@@ -0,0 +1,83 @@
resource "aws_alb" "fabio" {
name = "${var.namespace}-fabio"
security_groups = [aws_security_group.demostack.id]
subnets = aws_subnet.demostack.*.id
tags = local.common_tags
}
resource "aws_alb_target_group" "fabio" {
name = "${var.namespace}-fabio"
port = "9999"
vpc_id = aws_vpc.demostack.id
protocol = "HTTP"
tags = local.common_tags
health_check {
interval = "5"
timeout = "2"
path = "/health"
port = "9998"
protocol = "HTTP"
healthy_threshold = 2
matcher = 200
}
}
resource "aws_alb_target_group" "fabio-ui" {
name = "${var.namespace}-fabio-ui"
port = "9998"
vpc_id = aws_vpc.demostack.id
protocol = "HTTP"
tags = local.common_tags
health_check {
interval = "5"
timeout = "2"
path = "/health"
port = "9998"
protocol = "HTTP"
healthy_threshold = 2
matcher = 200
}
}
resource "aws_alb_listener" "fabio" {
load_balancer_arn = aws_alb.fabio.arn
port = "9999"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.fabio.arn
type = "forward"
}
}
resource "aws_alb_listener" "fabio-ui" {
load_balancer_arn = aws_alb.fabio.arn
port = "9998"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.fabio-ui.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "fabio" {
count = var.workers
target_group_arn = aws_alb_target_group.fabio.arn
target_id = element(aws_instance.workers.*.id, count.index)
port = "9999"
}
resource "aws_alb_target_group_attachment" "fabio-ui" {
count = var.workers
target_group_arn = aws_alb_target_group.fabio-ui.arn
target_id = element(aws_instance.workers.*.id, count.index)
port = "9998"
}

View File

@@ -0,0 +1,52 @@
resource "aws_alb" "nomad" {
name = "${var.namespace}-nomad"
security_groups = [aws_security_group.demostack.id]
subnets = aws_subnet.demostack.*.id
tags = local.common_tags
}
resource "aws_alb_target_group" "nomad" {
name = "${var.namespace}-nomad"
port = "4646"
vpc_id = aws_vpc.demostack.id
protocol = "HTTPS"
health_check {
interval = "5"
timeout = "2"
path = "/v1/agent/health"
port = "4646"
protocol = "HTTPS"
matcher = "200,429"
healthy_threshold = 2
}
}
resource "aws_alb_listener" "nomad" {
depends_on = [
aws_acm_certificate_validation.cert
]
load_balancer_arn = aws_alb.nomad.arn
port = "4646"
protocol = "HTTPS"
certificate_arn = aws_acm_certificate_validation.cert.certificate_arn
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
default_action {
target_group_arn = aws_alb_target_group.nomad.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "nomad" {
count = var.servers
target_group_arn = aws_alb_target_group.nomad.arn
target_id = element(aws_instance.servers.*.id, count.index)
port = "4646"
}

View File

@@ -0,0 +1,61 @@
////////////////////// Module //////////////////////////
output "servers" {
value = aws_route53_record.servers.*.fqdn
}
output "workers" {
value = aws_route53_record.workers.*.fqdn
}
output "vpc_id" {
value = aws_vpc.demostack.id
}
output "subnet_ids" {
value = aws_subnet.demostack.*.id
}
output "traefik_lb" {
value = "http://${aws_route53_record.traefik.fqdn}:8080"
}
output "fabio_lb" {
value = "http://${aws_route53_record.fabio.fqdn}:9999"
}
output "vault_ui" {
value = "https://${aws_route53_record.vault.fqdn}:8200"
}
output "nomad_ui" {
value = "https://${aws_route53_record.nomad.fqdn}:4646"
}
output "consul_ui" {
value = "https://${aws_route53_record.consul.fqdn}:8500"
}
output "boundary_ui" {
value = "http://${aws_route53_record.boundary.fqdn}:9200"
# value = "troubleshooting"
}
output "nomad_tag_workers" {
value = data.template_file.workers.*.vars.node_name
}
output "nomad_tag_servers" {
value = data.template_file.servers.*.vars.node_name
}
/*
output "eks_endpoint" {
value = aws_eks_cluster.eks.endpoint
}
output "eks_ca" {
// value = aws_eks_cluster.eks.endpoint
value = aws_eks_cluster.eks.certificate_authority.0.data
}
*/

View File

@@ -0,0 +1,107 @@
data "template_file" "servers" {
count = var.servers
template = join("\n", tolist([
file("${path.module}/templates/shared/base.sh"),
file("${path.module}/templates/shared/docker.sh"),
file("${path.module}/templates/server/consul.sh"),
file("${path.module}/templates/server/vault.sh"),
file("${path.module}/templates/server/nomad.sh"),
]))
vars = {
region = var.region
enterprise = var.enterprise
vaultlicense = var.vaultlicense
consullicense = var.consullicense
kmskey = aws_kms_key.demostackVaultKeys.id
namespace = var.namespace
node_name = "${var.namespace}-server-${count.index}"
# me_ca = tls_self_signed_cert.root.cert_pem
me_ca = var.ca_cert_pem
me_cert = element(tls_locally_signed_cert.server.*.cert_pem, count.index)
me_key = element(tls_private_key.server.*.private_key_pem, count.index)
public_key = var.public_key
# Consul
primary_datacenter = var.primary_datacenter
consul_url = var.consul_url
consul_ent_url = var.consul_ent_url
consul_gossip_key = var.consul_gossip_key
consul_join_tag_key = "ConsulJoin"
consul_join_tag_value = var.consul_join_tag_value
consul_master_token = var.consul_master_token
consul_servers = var.servers
meta_zone_tag = "${var.namespace}-${count.index}"
# Nomad
nomad_url = var.nomad_url
nomad_ent_url = var.nomad_ent_url
nomad_gossip_key = var.nomad_gossip_key
nomad_servers = var.servers
cni_plugin_url = var.cni_plugin_url
nomadlicense = var.nomadlicense
# Nomad jobs
fabio_url = var.fabio_url
# Vault
vault_oracle_client_url = var.vault_oracle_client_url
vault_url = var.vault_url
vault_ent_url = var.vault_ent_url
vault_root_token = random_id.vault-root-token.hex
vault_servers = var.servers
vault_api_addr = "https://${aws_route53_record.vault.fqdn}:8200"
}
}
# Gzip cloud-init config
data "template_cloudinit_config" "servers" {
count = var.servers
gzip = true
base64_encode = true
part {
content_type = "text/x-shellscript"
content = element(data.template_file.servers.*.rendered, count.index)
}
}
resource "aws_instance" "servers" {
count = var.servers
ami = data.aws_ami.ubuntu.id
instance_type = var.instance_type_server
key_name = aws_key_pair.demostack.id
subnet_id = element(aws_subnet.demostack.*.id, count.index)
iam_instance_profile = aws_iam_instance_profile.consul-join.name
vpc_security_group_ids = [aws_security_group.demostack.id]
root_block_device {
volume_size = "240"
delete_on_termination = "true"
}
ebs_block_device {
device_name = "/dev/xvdd"
volume_type = "gp2"
volume_size = "240"
delete_on_termination = "true"
}
tags = merge(local.common_tags ,{
ConsulJoin = "${var.consul_join_tag_value}" ,
Purpose = "demostack" ,
function = "server" ,
name = "demostack-server-${count.index}" ,
}
)
user_data = element(data.template_cloudinit_config.servers.*.rendered, count.index)
}

View File

@@ -0,0 +1,196 @@
# Server private key
resource "tls_private_key" "server" {
count = var.servers
algorithm = "ECDSA"
ecdsa_curve = "P521"
}
# Server signing request
resource "tls_cert_request" "server" {
count = var.servers
key_algorithm = element(tls_private_key.server.*.algorithm, count.index)
private_key_pem = element(tls_private_key.server.*.private_key_pem, count.index)
subject {
common_name = "${var.namespace}-server-${count.index}.node.consul"
organization = "HashiCorp Demostack"
}
dns_names = [
# Consul
"${var.namespace}-server-${count.index}.node.consul",
"${var.namespace}-server-${count.index}.node.${var.region}.consul",
"*.service.consul",
"*.service.${var.region}.consul",
"*.query.consul",
"consul.service.consul",
# Nomad
"nomad.service.consul",
"nomad.service.${var.region}.consul",
"client.global.nomad",
"server.global.nomad",
# Vault
"vault.service.consul",
"vault.query.consul",
"active.vault.service.consul",
"active.vault.service.${var.region}.consul",
"standby.vault.service.consul",
"standby.vault.service.${var.region}.consul",
"performance-standby.vault.service.consul",
"performance-standby.vault.service.${var.region}.consul",
# Common
"localhost",
"*.${var.namespace}.${data.aws_route53_zone.fdqn.name}",
"server-0.eu-guystack.original.aws.hashidemos.io",
"server-1.eu-guystack.original.aws.hashidemos.io",
"server-2.eu-guystack.original.aws.hashidemos.io",
]
// ip_addresses = ["${aws_eip.server_ips.*.public_ip }"]
}
# Server certificate
resource "tls_locally_signed_cert" "server" {
count = var.servers
cert_request_pem = element(tls_cert_request.server.*.cert_request_pem, count.index)
ca_key_algorithm = var.ca_key_algorithm
ca_private_key_pem = var.ca_private_key_pem
ca_cert_pem = var.ca_cert_pem
validity_period_hours = 720 # 30 days
allowed_uses = [
"client_auth",
"digital_signature",
"key_agreement",
"key_encipherment",
"server_auth",
]
}
# Vault initial root token
resource "random_id" "vault-root-token" {
byte_length = 8
prefix = "${var.namespace}-"
}
# Client private key
resource "tls_private_key" "workers" {
count = var.workers
algorithm = "ECDSA"
ecdsa_curve = "P521"
}
# Client signing request
resource "tls_cert_request" "workers" {
count = var.workers
key_algorithm = element(tls_private_key.workers.*.algorithm, count.index)
private_key_pem = element(tls_private_key.workers.*.private_key_pem, count.index)
subject {
common_name = "${var.namespace}-worker-${count.index}.node.consul"
organization = "HashiCorp Demostack"
}
dns_names = [
# Consul
"${var.namespace}-worker-${count.index}.node.consul",
"${var.namespace}-worker-${count.index}.node.${var.region}.consul",
"*.service.consul",
"*.service.${var.region}.consul",
"*.query.consul",
"consul.service.consul",
# Nomad
"nomad.service.consul",
"nomad.service.${var.region}.consul",
"client.global.nomad",
"server.global.nomad",
# Vault
"vault.service.consul",
"vault.query.consul",
"active.vault.service.consul",
"active.vault.service.${var.region}.consul",
"standby.vault.service.consul",
"standby.vault.service.${var.region}.consul",
"performance-standby.vault.service.consul",
"performance-standby.vault.service.${var.region}.consul",
# Common
"localhost",
"*.${var.namespace}.${data.aws_route53_zone.fdqn.name}",
]
/*
ip_addresses = [
"127.0.0.1",
]
*/
// ip_addresses = ["${aws_eip.server_ips.*.public_ip }"]
}
# Client certificate
resource "tls_locally_signed_cert" "workers" {
count = var.workers
cert_request_pem = element(tls_cert_request.workers.*.cert_request_pem, count.index)
ca_key_algorithm = var.ca_key_algorithm
ca_private_key_pem = var.ca_private_key_pem
ca_cert_pem = var.ca_cert_pem
validity_period_hours = 720 # 30 days
allowed_uses = [
"client_auth",
"digital_signature",
"key_agreement",
"key_encipherment",
"server_auth",
]
}
// ALB certs
resource "aws_acm_certificate" "cert" {
domain_name = "*.${var.namespace}.${data.aws_route53_zone.fdqn.name}"
validation_method = "DNS"
tags = local.common_tags
lifecycle {
create_before_destroy = true
}
}
resource "aws_route53_record" "validation_record" {
name = aws_acm_certificate.cert.domain_validation_options.0.resource_record_name
type = aws_acm_certificate.cert.domain_validation_options.0.resource_record_type
zone_id = var.zone_id
records = [aws_acm_certificate.cert.domain_validation_options.0.resource_record_value]
ttl = "60"
allow_overwrite = true
lifecycle {
create_before_destroy = true
}
}
resource "aws_acm_certificate_validation" "cert" {
certificate_arn = aws_acm_certificate.cert.arn
validation_record_fqdns = [
aws_route53_record.validation_record.fqdn,
]
}

View File

@@ -0,0 +1,80 @@
resource "aws_alb" "traefik" {
name = "${var.namespace}-traefik"
security_groups = [aws_security_group.demostack.id]
subnets = aws_subnet.demostack.*.id
tags = local.common_tags
}
resource "aws_alb_target_group" "traefik" {
name = "${var.namespace}-traefik"
port = "8080"
vpc_id = aws_vpc.demostack.id
protocol = "HTTP"
health_check {
interval = "5"
timeout = "2"
path = "/ping"
port = "8080"
protocol = "HTTP"
healthy_threshold = 2
matcher = 200
}
}
resource "aws_alb_target_group" "traefik-ui" {
name = "${var.namespace}-traefik-ui"
port = "8081"
vpc_id = aws_vpc.demostack.id
protocol = "HTTP"
health_check {
interval = "5"
timeout = "2"
path = "/ping"
port = "8080"
protocol = "HTTP"
healthy_threshold = 2
matcher = 200
}
}
resource "aws_alb_listener" "traefik" {
load_balancer_arn = aws_alb.traefik.arn
port = "8080"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.traefik.arn
type = "forward"
}
}
resource "aws_alb_listener" "traefik-ui" {
load_balancer_arn = aws_alb.traefik.arn
port = "8081"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.traefik-ui.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "traefik" {
count = var.workers
target_group_arn = aws_alb_target_group.traefik.arn
target_id = element(aws_instance.workers.*.id, count.index)
port = "8080"
}
resource "aws_alb_target_group_attachment" "traefik-ui" {
count = var.workers
target_group_arn = aws_alb_target_group.traefik-ui.arn
target_id = element(aws_instance.workers.*.id, count.index)
port = "8081"
}

View File

@@ -0,0 +1,197 @@
locals {
# Common tags to be assigned to all resources
common_tags = {
name = var.namespace
owner = var.owner
created-by = var.created-by
sleep-at-night = var.sleep-at-night
ttl = var.TTL
se-region = var.region
terraform = true
purpose = "SE Demostack"
}
}
variable "vault_oracle_client_url" {
description = "The url to download vault."
default = "https://releases.hashicorp.com/vault-plugin-database-oracle/0.2.1/vault-plugin-database-oracle_0.2.1_linux_amd64.zip"
}
variable "region" {
description = "The region to create resources."
default = "eu-west-2"
}
variable "namespace" {
description = <<EOH
this is the differantiates different demostack deployment on the same subscription, everycluster should have a different value
EOH
default = "connectdemo"
}
variable "servers" {
description = "The number of data servers (consul, nomad, etc)."
default = "3"
}
variable "workers" {
description = "The number of nomad worker vms to create."
default = "3"
}
variable "consul_url" {
description = "The url to download Consul."
default = "https://releases.hashicorp.com/consul/1.2.3/consul_1.2.3_linux_amd64.zip"
}
variable "consul_ent_url" {
description = "The url to download Consul."
default = "https://s3-us-west-2.amazonaws.com/hc-enterprise-binaries/consul/ent/1.2.3/consul-enterprise_1.2.3%2Bent_linux_amd64.zip"
}
variable "fabio_url" {
description = "The url download fabio."
default = "https://github.com/fabiolb/fabio/releases/download/v1.5.7/fabio-1.5.7-go1.9.2-linux_amd64"
}
variable "nomad_url" {
description = "The url to download nomad."
default = "https://releases.hashicorp.com/nomad/0.8.6/nomad_0.8.6_linux_amd64.zip"
}
variable "nomad_ent_url" {
description = "The url to download nomad."
default = "https://releases.hashicorp.com/nomad/0.8.6/nomad_0.8.6_linux_amd64.zip"
}
variable "cni_plugin_url" {
description = "The url to download teh CNI plugin for nomad."
default = "https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz"
}
variable "vault_url" {
description = "The url to download vault."
default = "https://releases.hashicorp.com/vault/0.11.1/vault_0.11.1_linux_amd64.zip"
}
variable "vault_ent_url" {
description = "The url to download vault."
default = "https://s3-us-west-2.amazonaws.com/hc-enterprise-binaries/vault/ent/0.11.1/vault-enterprise_0.11.1%2Bent_linux_amd64.zip"
}
variable "owner" {
description = "Email address of the user responsible for lifecycle of cloud resources used for training."
}
variable "hashi_region" {
description = "the region the owner belongs in. e.g. NA-WEST-ENT, EU-CENTRAL"
default = "EMEA"
}
variable "created-by" {
description = "Tag used to identify resources created programmatically by Terraform"
default = "Terraform"
}
variable "sleep-at-night" {
description = "Tag used by reaper to identify resources that can be shutdown at night"
default = true
}
variable "TTL" {
description = "Hours after which resource expires, used by reaper. Do not use any unit. -1 is infinite."
default = "240"
}
variable "vpc_cidr_block" {
description = "The top-level CIDR block for the VPC."
default = "10.1.0.0/16"
}
variable "cidr_blocks" {
description = "The CIDR blocks to create the workstations in."
default = ["10.1.1.0/24", "10.1.2.0/24"]
}
variable "zone_id" {
description = "The CIDR blocks to create the workstations in."
default = ""
}
variable "public_key" {
description = "The contents of the SSH public key to use for connecting to the cluster."
}
variable "enterprise" {
description = "do you want to use the enterprise version of the binaries"
default = false
}
variable "vaultlicense" {
description = "Enterprise License for Vault"
default = ""
}
variable "consullicense" {
description = "Enterprise License for Consul"
default = ""
}
variable "nomadlicense" {
description = "Enterprise License for Nomad"
default = ""
}
variable "instance_type_server" {
description = "The type(size) of data servers (consul, nomad, etc)."
default = "r4.large"
}
variable "instance_type_worker" {
description = "The type(size) of data servers (consul, nomad, etc)."
default = "t2.xlarge"
}
variable "ca_key_algorithm" {
default = ""
}
variable "ca_private_key_pem" {
default = ""
}
variable "ca_cert_pem" {
default = ""
}
variable "consul_gossip_key" {
default = ""
}
variable "consul_master_token" {
default = ""
}
variable "consul_join_tag_value" {
default = ""
}
variable "nomad_gossip_key" {
default = ""
}
variable "run_nomad_jobs" {
default = "0"
}
variable "host_access_ip" {
description = "CIDR blocks allowed to connect via SSH on port 22"
default = []
}
variable "primary_datacenter" {
description = "the primary datacenter for mesh gateways"
default = ""
}

View File

@@ -0,0 +1,116 @@
resource "aws_alb" "vault" {
name = "${var.namespace}-vault"
security_groups = [aws_security_group.demostack.id]
subnets = aws_subnet.demostack.*.id
tags = local.common_tags
}
resource "aws_alb_target_group" "vault" {
name = "${var.namespace}-vault"
tags = local.common_tags
port = "8200"
vpc_id = aws_vpc.demostack.id
protocol = "HTTPS"
health_check {
interval = "5"
timeout = "2"
path = "/v1/sys/health"
port = "8200"
protocol = "HTTPS"
matcher = "200,472,473"
healthy_threshold = 2
}
}
resource "aws_alb_listener" "vault" {
depends_on = [
aws_acm_certificate_validation.cert
]
load_balancer_arn = aws_alb.vault.arn
port = "8200"
protocol = "HTTPS"
certificate_arn = aws_acm_certificate_validation.cert.certificate_arn
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
default_action {
target_group_arn = aws_alb_target_group.vault.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "vault" {
count = var.servers
target_group_arn = aws_alb_target_group.vault.arn
target_id = element(aws_instance.servers.*.id, count.index)
port = "8200"
}
##########################################################
resource "aws_alb" "vault_cluster" {
name = "${var.namespace}-vault-cluster"
security_groups = [aws_security_group.demostack.id]
subnets = aws_subnet.demostack.*.id
tags = local.common_tags
}
resource "aws_alb_target_group" "vault_cluster" {
name = "${var.namespace}-vault-cluster"
tags = local.common_tags
port = "8201"
vpc_id = aws_vpc.demostack.id
protocol = "HTTPS"
health_check {
interval = "5"
timeout = "2"
path = "/v1/sys/health"
port = "8200"
protocol = "HTTPS"
matcher = "200"
healthy_threshold = 2
}
}
resource "aws_alb_listener" "vault_cluster" {
depends_on = [
aws_acm_certificate_validation.cert
]
load_balancer_arn = aws_alb.vault.arn
port = "8201"
protocol = "HTTPS"
certificate_arn = aws_acm_certificate_validation.cert.certificate_arn
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
default_action {
target_group_arn = aws_alb_target_group.vault_cluster.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "vault_cluster" {
count = var.servers
target_group_arn = aws_alb_target_group.vault_cluster.arn
target_id = element(aws_instance.servers.*.id, count.index)
port = "8201"
}

View File

@@ -0,0 +1,107 @@
data "template_file" "workers" {
count = var.workers
template = join("\n", tolist([
file("${path.module}/templates/shared/base.sh"),
file("${path.module}/templates/shared/docker.sh"),
file("${path.module}/templates/workers/consul.sh"),
file("${path.module}/templates/workers/vault.sh"),
file("${path.module}/templates/workers/nomad.sh"),
file("${path.module}/templates/workers/ebs_volumes.sh"),
]))
vars = {
namespace = var.namespace
region = var.region
node_name = "${var.namespace}-worker-${count.index}"
enterprise = var.enterprise
#me_ca = tls_self_signed_cert.root.cert_pem
me_ca = var.ca_cert_pem
me_cert = element(tls_locally_signed_cert.workers.*.cert_pem, count.index)
me_key = element(tls_private_key.workers.*.private_key_pem, count.index)
public_key = var.public_key
# Consul
consul_url = var.consul_url
consul_ent_url = var.consul_ent_url
consul_gossip_key = var.consul_gossip_key
consul_join_tag_key = "ConsulJoin"
consul_join_tag_value = var.consul_join_tag_value
meta_zone_tag = "${var.namespace}-${count.index}"
# Vault
vault_url = var.vault_url
vault_ent_url = var.vault_ent_url
vault_root_token = random_id.vault-root-token.hex
vault_servers = var.workers
# Nomad
nomad_url = var.nomad_url
nomad_ent_url = var.nomad_ent_url
cni_plugin_url = var.cni_plugin_url
run_nomad_jobs = var.run_nomad_jobs
# Nomad EBS Volumes
index = count.index+1
count = var.workers
dc1 = data.aws_availability_zones.available.names[0]
dc2 = data.aws_availability_zones.available.names[1]
dc3 = data.aws_availability_zones.available.names[2]
aws_ebs_volume_mysql_id = aws_ebs_volume.shared.id
aws_ebs_volume_mongodb_id = aws_ebs_volume.mongodb.id
aws_ebs_volume_prometheus_id = aws_ebs_volume.prometheus.id
aws_ebs_volume_shared_id = aws_ebs_volume.shared.id
}
}
# Gzip cloud-init config
data "template_cloudinit_config" "workers" {
count = var.workers
gzip = true
base64_encode = true
part {
content_type = "text/x-shellscript"
content = element(data.template_file.workers.*.rendered, count.index)
}
}
resource "aws_instance" "workers" {
count = var.workers
ami = data.aws_ami.ubuntu.id
instance_type = var.instance_type_worker
key_name = aws_key_pair.demostack.id
subnet_id = element(aws_subnet.demostack.*.id, count.index)
iam_instance_profile = aws_iam_instance_profile.consul-join.name
vpc_security_group_ids = [aws_security_group.demostack.id]
root_block_device {
volume_size = "240"
delete_on_termination = "true"
}
ebs_block_device {
device_name = "/dev/xvdd"
volume_type = "gp2"
volume_size = "240"
delete_on_termination = "true"
}
tags = merge(local.common_tags ,{
ConsulJoin = "${var.consul_join_tag_value}" ,
Purpose = "demostack" ,
function = "worker"
name = "demostack-worker-${count.index}" ,
}
)
user_data = element(data.template_cloudinit_config.workers.*.rendered, count.index)
}

View File

@@ -0,0 +1,173 @@
////////////////////// Main //////////////////////////
output "A_Welcome_Message" {
value = <<SHELLCOMMANDS
ooooo ooooo oooo o8o .oooooo.
`888' `888' `888 `"' d8P' `Y8b
888 888 .oooo. .oooo.o 888 .oo. oooo 888 .ooooo. oooo d8b oo.ooooo.
888ooooo888 `P )88b d88( "8 888P"Y88b `888 888 d88' `88b `888""8P 888' `88b
888 888 .oP"888 `"Y88b. 888 888 888 888 888 888 888 888 888
888 888 d8( 888 o. )88b 888 888 888 `88b ooo 888 888 888 888 888
o888o o888o `Y888""8o 8""888P' o888o o888o o888o `Y8bood8P' `Y8bod8P' d888b 888bod8P'
888
o888o
|.--------_--_------------_--__--.|
|| /\ |_)|_)| /\ | |(_ |_ ||
;;`,_/``\|__|__|__/``\|_| _)|__ ,:|
((_(-,-----------.-.----------.-.)`)
\__ ) ,' `. \ _/
: : |_________| : :
|-'| ,'-.-.--.-.`. |`-|
|_.| (( (* )(* ))) |._|
| | `.-`-'--`-'.' | |
|-'| | ,-.-.-. | |._|
| | |(|-|-|-|)| | |
:,': |_`-'-'-'_| ;`.;
\ \ ,' `. /._/
\/ `._ /_______________\_,' /
\ / : ___________ : \,'
`.| | | | |,'
`.| | | |
| | HashiCorp | |
SHELLCOMMANDS
}
// Primary
output "Primary_Region" {
value = var.primary_region
}
output "Primary_Consul" {
value = module.primarycluster.consul_ui
}
output "Primary_Nomad" {
value = module.primarycluster.nomad_ui
}
output "Primary_Vault" {
value = module.primarycluster.vault_ui
}
output "Primary_Fabio" {
value = module.primarycluster.fabio_lb
}
output "Primary_Traefik" {
value = module.primarycluster.traefik_lb
}
output "Primary_Boundary" {
value = module.primarycluster.boundary_ui
}
output "Primary_servers_nodes" {
value = module.primarycluster.servers
}
output "Primary_workers_Nodes" {
value = module.primarycluster.workers
}
output "Primary_nomad_tag_workers" {
value = module.primarycluster.nomad_tag_workers
}
output "Primary_nomad_tag_servers" {
value = module.primarycluster.nomad_tag_servers
}
/**
output "Primary_k8s_eks_endpoint"{
value = module.primarycluster.eks_endpoint
}
output "Primary_k8s_eks_ca"{
value = module.primarycluster.eks_ca
}
**/
// Secondary
/**
output "Secondary_Region" {
value = var.secondary_region
}
output "Secondary_Consul" {
value = module.secondarycluster.consul_ui
}
output "Secondary_Nomad" {
value = module.secondarycluster.nomad_ui
}
output "Secondary_Vault" {
value = module.secondarycluster.vault_ui
}
output "Secondary_Fabio" {
value = module.secondarycluster.fabio_lb
}
output "Secondary_Traefik" {
value = module.secondarycluster.traefik_lb
}
output "Secondary_Boundary" {
value = module.secondarycluster.boundary_ui
}
output "Secondary_servers_nodes" {
value = module.secondarycluster.servers
}
output "Secondary_workers_Nodes" {
value = module.secondarycluster.workers
}
output "Secondary_nomad_tag_workers" {
value = module.secondarycluster.nomad_tag_workers
}
output "Secondary_nomad_tag_servers" {
value = module.secondarycluster.nomad_tag_servers
}
*/
// Tertiary
/*
output "Tertiary_Region" {
value = var.tertiary_region
}
output "Tertiary_Consul" {
value = module.tertiarycluster.consul_ui
}
output "Tertiary_Nomad" {
value = module.tertiarycluster.nomad_ui
}
output "Tertiary_Vault" {
value = module.tertiarycluster.vault_ui
}
output "Tertiary_Fabio" {
value = module.tertiarycluster.fabio_lb
}
output "Tertiary_server_nodes" {
value = module.tertiarycluster.servers
}
output "Tertiary_workers_Nodes" {
value = module.tertiarycluster.workers
}
output "Tertiary_nomad_tag_workers"{
value = module.tertiarycluster.nomad_tag_workers
}
output "Tertiary_nomad_tag_servers"{
value = module.tertiarycluster.nomad_tag_servers
}
*/

View File

@@ -0,0 +1,238 @@
/*
data "http" "myipaddr" {
url = "http://ipv4.icanhazip.com"
}
locals {
host_access_ip = ["${chomp(data.http.myipaddr.body)}/32"]
}
*/
variable "host_access_ip" {
description = "your IP address to allow ssh to work"
default = []
}
variable "create_primary_cluster" {
description = "Set to true if you want to deploy the AWS delegated zone."
type = bool
default = "true"
}
variable "create_secondary_cluster" {
description = "Set to true if you want to deploy the AWS delegated zone."
type = bool
default = "false"
}
variable "create_tertiary_cluster" {
description = "Set to true if you want to deploy the AWS delegated zone."
type = bool
default = "false"
}
variable "namespace" {
description = <<EOH
this is the differantiates different demostack deployment on the same subscription, everycluster should have a different value
EOH
default = "primarystack"
}
variable "primary_namespace" {
description = <<EOH
this is the differantiates different demostack deployment on the same subscription, everycluster should have a different value
EOH
default = "primarystack"
}
variable "secondary_namespace" {
description = <<EOH
this is the differantiates different demostack deployment on the same subscription, everycluster should have a different value
EOH
default = "secondarystack"
}
variable "tertiary_namespace" {
description = <<EOH
this is the differantiates different demostack deployment on the same subscription, everycluster should have a different value
EOH
default = "tertiarystack"
}
variable "primary_region" {
description = "The region to create resources."
default = "eu-west-2"
}
variable "secondary_region" {
description = "The region to create resources."
default = "eu-west-2"
}
variable "tertiary_region" {
description = "The region to create resources."
default = "ap-northeast-1"
}
variable "servers" {
description = "The number of data servers (consul, nomad, etc)."
default = "3"
}
variable "workers" {
description = "The number of nomad worker vms to create."
default = "3"
}
variable "consul_url" {
description = "The url to download Consul."
default = "https://releases.hashicorp.com/consul/1.2.2/consul_1.2.2_linux_amd64.zip"
}
variable "consul_ent_url" {
description = "The url to download Consul."
default = "https://releases.hashicorp.com/consul/1.2.2/consul_1.2.2_linux_amd64.zip"
}
variable "fabio_url" {
description = "The url download fabio."
default = "https://github.com/fabiolb/fabio/releases/download/v1.5.7/fabio-1.5.7-go1.9.2-linux_amd64"
}
variable "nomad_url" {
description = "The url to download nomad."
default = "https://releases.hashicorp.com/nomad/0.8.4/nomad_0.8.4_linux_amd64.zip"
}
variable "nomad_ent_url" {
description = "The url to download nomad."
default = "https://releases.hashicorp.com/nomad/0.8.4/nomad_0.8.4_linux_amd64.zip"
}
variable "cni_plugin_url" {
description = "The url to download teh CNI plugin for nomad."
default = "https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz"
}
variable "vault_url" {
description = "The url to download vault."
default = "https://releases.hashicorp.com/vault/0.11.1/vault_0.11.1_linux_amd64.zip"
}
variable "vault_ent_url" {
description = "The url to download vault."
default = "https://s3-us-west-2.amazonaws.com/hc-enterprise-binaries/vault/ent/0.11.1/vault-enterprise_0.11.1%2Bent_linux_amd64.zip"
}
variable "owner" {
description = "IAM user responsible for lifecycle of cloud resources used for training"
}
variable "created-by" {
description = "Tag used to identify resources created programmatically by Terraform"
default = "Terraform"
}
variable "sleep-at-night" {
description = "Tag used by reaper to identify resources that can be shutdown at night"
default = true
}
variable "TTL" {
description = "Hours after which resource expires, used by reaper. Do not use any unit. -1 is infinite."
default = "240"
}
variable "vpc_cidr_block" {
description = "The top-level CIDR block for the VPC."
default = "10.1.0.0/16"
}
variable "cidr_blocks" {
description = "The CIDR blocks to create the workstations in."
default = ["10.1.1.0/24", "10.1.2.0/24"]
}
variable "zone_id" {
description = "The CIDR blocks to create the workstations in."
default = ""
}
variable "public_key" {
description = "The contents of the SSH public key to use for connecting to the cluster."
}
variable "enterprise" {
description = "do you want to use the enterprise version of the binaries"
default = false
}
variable "vaultlicense" {
description = "Enterprise License for Vault"
default = ""
}
variable "consullicense" {
description = "Enterprise License for Consul"
default = ""
}
variable "nomadlicense" {
description = "Enterprise License for Nomad"
default = ""
}
variable "instance_type_server" {
description = "The type(size) of data servers (consul, nomad, etc)."
default = "r4.large"
}
variable "instance_type_worker" {
description = "The type(size) of data servers (consul, nomad, etc)."
default = "t2.medium"
}
variable "ca_key_algorithm" {
default = ""
}
variable "ca_private_key_pem" {
default = ""
}
variable "ca_cert_pem" {
default = ""
}
variable "consul_gossip_key" {
default = ""
}
variable "consul_master_token" {
default = ""
}
variable "consul_join_tag_value" {
default = ""
}
variable "nomad_gossip_key" {
default = ""
}
variable "run_nomad_jobs" {
default = "0"
}
variable "primary_datacenter" {
description = "the primary datacenter for mesh gateways"
default = ""
}

View File

@@ -0,0 +1,4 @@
terraform {
required_version = ">= 0.12"
}

View File

@@ -0,0 +1,3 @@
data "scaffolding_data_source" "example" {
sample_attribute = "foo"
}

View File

@@ -0,0 +1,3 @@
provider "scaffolding" {
# example configuration here
}

View File

@@ -0,0 +1,3 @@
resource "scaffolding_resource" "example" {
sample_attribute = "foo"
}

View File

@@ -0,0 +1,56 @@
disable_mlock = true
controller {
name = "docker-controller"
description = "A controller for a docker demo!"
database {
url = "env://BOUNDARY_PG_URL"
}
}
worker {
name = "docker-worker"
description = "A worker for a docker demo"
#address = "boundary"
public_addr = "127.0.0.1"
}
listener "tcp" {
address = "boundary"
purpose = "api"
tls_disable = true
}
listener "tcp" {
address = "boundary"
purpose = "cluster"
tls_disable = true
}
listener "tcp" {
address = "boundary"
purpose = "proxy"
tls_disable = true
}
kms "aead" {
purpose = "root"
aead_type = "aes-gcm"
key = "sP1fnF5Xz85RrXyELHFeZg9Ad2qt4Z4bgNHVGtD6ung="
key_id = "global_root"
}
kms "aead" {
purpose = "worker-auth"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_worker-auth"
}
kms "aead" {
purpose = "recovery"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_recovery"
}

View File

@@ -0,0 +1,64 @@
log_level = "info"
syslog {
enabled = true
}
consul {
#address = "consul:8500"
address = "localhost:8500"
}
terraform_provider "consul" {
#address = "consul:8500"
address = "localhost:8500"
datacenter = "dc1"
}
terraform_provider "boundary" {
#addr = "http://boundary:9200"
addr = "http://localhost:9200"
recovery_kms_hcl = <<EOT
kms "aead" {
purpose = "recovery"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_recovery"
}
EOT
}
driver "terraform" {
log = true
persist_log = false
path = ""
working_dir = "./cts/sync-tasks/"
backend "consul" {
gzip = true
}
required_providers {
consul = {
source = "hashicorp/http"
version = "2.1.0"
}
boundary = {
source = "hashicorp/boundary"
version = "1.0.2"
}
}
}
task {
name = "boundary"
source = "./boundary"
providers = ["http", "boundary"]
services = ["redis","mysql"]
buffer_period {
min = "5s"
}
}

View File

@@ -0,0 +1,102 @@
data "http" "scopes" {
url = "http://localhost:9200/v1/scopes"
}
data "http" "project" {
url = "http://localhost:9200/v1/scopes?scope_id=${local.primary_scope_id}"
}
data "http" "host_catalogs" {
url = "http://localhost:9200/v1/host-catalogs?scope_id=${local.database_project_id}"
}
locals {
primary_scope_id = [for i in jsondecode(data.http.scopes.body).items : i.id if i.name == "primary"][0]
database_project_id = [for i in jsondecode(data.http.project.body).items : i.id if i.name == "databases"][0]
database_host_catalog_id = [for i in jsondecode(data.http.host_catalogs.body).items : i.id if i.name == "databases"][0]
# Create a map of service names to instance IDs to then build
# a map of service names to instances
consul_service_ids = transpose({
for id, s in var.services : id => [s.name]
})
# Group service instances by service name
# consul_services = {
# "app" = [
# {
# "id" = "app-id-01"
# "name" = "app"
# "node_address" = "192.168.10.10"
# }
# ]
# }
consul_services = {
for name, ids in local.consul_service_ids :
name => [for id in ids : var.services[id]]
}
}
output "test" {
value = local.consul_services
}
resource "boundary_host" "mysql" {
for_each = {for s in local.consul_services["mysql"] : s.id => s}
type = "static"
name = "mysql"
description = "Private mysql container"
address = each.value.address
host_catalog_id = local.database_host_catalog_id
}
resource "boundary_host_set" "mysql" {
type = "static"
name = "mysql"
description = "Host set for mysql containers"
host_catalog_id = local.database_host_catalog_id
host_ids = [for k,v in boundary_host.mysql : v.id]
}
resource "boundary_target" "mysql" {
type = "tcp"
name = "mysql"
description = "MySQL server"
scope_id = local.database_project_id
session_connection_limit = -1
session_max_seconds = 300
default_port = 3306
host_set_ids = [
boundary_host_set.mysql.id
]
}
resource "boundary_host" "redis" {
for_each = {for s in local.consul_services["redis"] : s.id => s}
type = "static"
name = each.key
description = "Private redis container"
address = each.value.address
host_catalog_id = local.database_host_catalog_id
}
resource "boundary_host_set" "redis" {
type = "static"
name = "redis"
description = "Host set for redis containers"
host_catalog_id = local.database_host_catalog_id
host_ids = [for k,v in boundary_host.redis : v.id]
}
resource "boundary_target" "redis" {
type = "tcp"
name = "redis"
description = "Redis server"
scope_id = local.database_project_id
session_connection_limit = -1
session_max_seconds = 300
default_port = 6379
host_set_ids = [
boundary_host_set.redis.id
]
}

View File

@@ -0,0 +1,25 @@
variable "services" {
description = "Consul services monitored by Consul Terraform Sync"
type = map(
object({
id = string
name = string
kind = string
address = string
port = number
meta = map(string)
tags = list(string)
namespace = string
status = string
node = string
node_id = string
node_address = string
node_datacenter = string
node_tagged_addresses = map(string)
node_meta = map(string)
cts_user_defined_meta = map(string)
})
)
}

View File

@@ -0,0 +1,36 @@
# This file is generated by Consul Terraform Sync.
#
# The HCL blocks, arguments, variables, and values are derived from the
# operator configuration for Sync. Any manual changes to this file
# may not be preserved and could be overwritten by a subsequent update.
#
# Task: boundary
# Description:
terraform {
required_version = ">= 0.13.0, < 0.15"
required_providers {
boundary = {
source = "hashicorp/boundary"
version = "1.0.2"
}
}
backend "consul" {
address = "localhost:8500"
gzip = true
path = "consul-terraform-sync/terraform"
}
}
provider "boundary" {
addr = var.boundary.addr
recovery_kms_hcl = var.boundary.recovery_kms_hcl
}
provider "http" {
}
module "boundary" {
source = "./boundary"
services = var.services
}

View File

@@ -0,0 +1,50 @@
# This file is generated by Consul Terraform Sync.
#
# The HCL blocks, arguments, variables, and values are derived from the
# operator configuration for Sync. Any manual changes to this file
# may not be preserved and could be overwritten by a subsequent update.
#
# Task: boundary
# Description:
# Service definition protocol v0
variable "services" {
description = "Consul services monitored by Consul Terraform Sync"
type = map(
object({
id = string
name = string
kind = string
address = string
port = number
meta = map(string)
tags = list(string)
namespace = string
status = string
node = string
node_id = string
node_address = string
node_datacenter = string
node_tagged_addresses = map(string)
node_meta = map(string)
cts_user_defined_meta = map(string)
})
)
}
variable "boundary" {
default = null
description = "Configuration object for boundary"
type = object({
addr = string
recovery_kms_hcl = string
})
}
variable "http" {
default = null
description = "Configuration object for http"
type = object({})
}

View File

@@ -0,0 +1,36 @@
# This file is generated by Consul Terraform Sync.
#
# The HCL blocks, arguments, variables, and values are derived from the
# operator configuration for Sync. Any manual changes to this file
# may not be preserved and could be overwritten by a subsequent update.
#
# Task: boundary
# Description:
terraform {
required_version = ">= 0.13.0, < 0.15"
required_providers {
boundary = {
source = "hashicorp/boundary"
version = "1.0.2"
}
}
backend "consul" {
address = "localhost:8500"
gzip = true
path = "consul-terraform-sync/terraform"
}
}
provider "boundary" {
addr = var.boundary.addr
recovery_kms_hcl = var.boundary.recovery_kms_hcl
}
provider "http" {
}
module "boundary" {
source = "./boundary"
services = var.services
}

View File

@@ -0,0 +1,52 @@
# This file is generated by Consul Terraform Sync.
#
# The HCL blocks, arguments, variables, and values are derived from the
# operator configuration for Sync. Any manual changes to this file
# may not be preserved and could be overwritten by a subsequent update.
#
# Task: boundary
# Description:
# Service definition protocol v0
variable "services" {
description = "Consul services monitored by Consul Terraform Sync"
type = map(
object({
id = string
name = string
kind = string
address = string
port = number
meta = map(string)
tags = list(string)
namespace = string
status = string
node = string
node_id = string
node_address = string
node_datacenter = string
node_tagged_addresses = map(string)
node_meta = map(string)
cts_user_defined_meta = map(string)
})
)
}
variable "boundary" {
default = null
description = "Configuration object for boundary"
sensitive = true
type = object({
addr = string
recovery_kms_hcl = string
})
}
variable "http" {
default = null
description = "Configuration object for http"
sensitive = true
type = object({})
}

View File

@@ -0,0 +1,231 @@
terraform {
required_providers {
boundary = {
source = "hashicorp/boundary"
version = "1.0.2"
}
}
backend "consul" {
address = "localhost:8500"
scheme = "http"
path = "terraform/boundary"
}
}
provider "boundary" {
addr = "http://127.0.0.1:9200"
recovery_kms_hcl = <<EOT
kms "aead" {
purpose = "recovery"
aead_type = "aes-gcm"
key = "8fZBjCUfN0TzjEGLQldGY4+iE9AkOvCfjh7+p0GtRBQ="
key_id = "global_recovery"
}
EOT
}
variable "users" {
type = set(string)
default = [
"jim",
"mike",
"todd",
"randy",
"susmitha",
"jeff",
"pete",
"harold",
"patrick",
"jonathan",
"yoko",
"brandon",
"kyle",
"justin",
"melissa",
"paul",
"mitchell",
"armon",
"andy",
"ben",
"kristopher",
"kris",
"chris",
"swarna",
]
}
resource "boundary_scope" "global" {
global_scope = true
name = "global"
scope_id = "global"
}
resource "boundary_scope" "org" {
scope_id = boundary_scope.global.id
name = "primary"
description = "Primary organization scope"
}
resource "boundary_scope" "project" {
name = "databases"
description = "Databases project"
scope_id = boundary_scope.org.id
auto_create_admin_role = true
auto_create_default_role = true
}
resource "boundary_user" "user" {
for_each = var.users
name = each.key
description = "User resource for ${each.key}"
account_ids = [boundary_account.user[each.value].id]
scope_id = boundary_scope.org.id
}
resource "boundary_auth_method" "password" {
name = "org_password_auth"
description = "Password auth method for org"
type = "password"
scope_id = boundary_scope.org.id
}
resource "boundary_account" "user" {
for_each = var.users
name = each.key
description = "User account for ${each.key}"
type = "password"
login_name = lower(each.key)
password = "foofoofoo"
auth_method_id = boundary_auth_method.password.id
}
resource "boundary_role" "global_anon_listing" {
scope_id = boundary_scope.global.id
grant_strings = [
"id=*;type=auth-method;actions=list,authenticate",
"id=*;type=scope;actions=*",
"id={{account.id}};actions=read,change-password",
"id=*;type=host-catalog;actions=*",
"type=host-catalog;actions=list"
]
principal_ids = ["u_anon"]
}
resource "boundary_role" "org_anon_listing" {
scope_id = boundary_scope.org.id
grant_strings = [
"id=*;type=auth-method;actions=list,authenticate",
"id=*;type=scope;actions=*",
"id={{account.id}};actions=read,change-password",
"id=*;type=host-catalog;actions=*",
"type=host-catalog;actions=list"
]
principal_ids = ["u_anon"]
}
resource "boundary_role" "org_admin" {
scope_id = "global"
grant_scope_id = boundary_scope.org.id
grant_strings = ["id=*;type=*;actions=*"]
principal_ids = concat(
[for user in boundary_user.user : user.id],
["u_auth"]
)
}
resource "boundary_role" "proj_admin" {
scope_id = boundary_scope.org.id
grant_scope_id = boundary_scope.project.id
grant_strings = ["id=*;type=*;actions=*"]
principal_ids = concat(
[for user in boundary_user.user : user.id],
["u_auth"]
)
}
resource "boundary_role" "proj_anon_listing" {
scope_id = boundary_scope.org.id
grant_scope_id = boundary_scope.project.id
grant_strings = [
"id=*;type=auth-method;actions=list,authenticate",
"id=*;type=scope;actions=*",
"id={{account.id}};actions=read,change-password",
"id=*;type=host-catalog;actions=*",
"type=host-catalog;actions=list"
]
principal_ids = ["u_anon"]
}
resource "boundary_host_catalog" "databases" {
name = "databases"
description = "Database targets"
type = "static"
scope_id = boundary_scope.project.id
}
resource "boundary_host" "localhost" {
type = "static"
name = "localhost"
description = "Localhost host"
address = "localhost"
host_catalog_id = boundary_host_catalog.databases.id
}
# Target hosts available on localhost: ssh and postgres
# Postgres is exposed to localhost for debugging of the
# Boundary DB from the CLI. Assumes SSHD is running on
# localhost.
resource "boundary_host_set" "local" {
type = "static"
name = "local"
description = "Host set for local servers"
host_catalog_id = boundary_host_catalog.databases.id
host_ids = [boundary_host.localhost.id]
}
resource "boundary_target" "ssh" {
type = "tcp"
name = "ssh"
description = "SSH server"
scope_id = boundary_scope.project.id
session_connection_limit = -1
session_max_seconds = 2
default_port = 22
host_set_ids = [
boundary_host_set.local.id
]
}
resource "boundary_target" "postgres" {
type = "tcp"
name = "postgres"
description = "Postgres server"
scope_id = boundary_scope.project.id
session_connection_limit = -1
session_max_seconds = 2
default_port = 5432
host_set_ids = [
boundary_host_set.local.id
]
}
#resource "boundary_auth_method_oidc" "provider" {
# name = "<oidc provider>"
# description = "OIDC auth method for AAD"
# scope_id = boundary_scope.org.id
# issuer = "<issuer_url>"
# client_id = "<client_id>"
# client_secret = "<client_secret>"
# signing_algorithms = ["RS256"]
# api_url_prefix = "http://localhost:9200"
# is_primary_for_scope = true
# state = "active-public"
# max_age = 0
#}
#resource "boundary_account_oidc" "oidc_user" {
# name = "user1"
# description = "OIDC account for user1"
# auth_method_id = boundary_auth_method_oidc.provider.id
# issuer = "<Issuer_URL>"
# subject = "<Object_Id>"
#}

View File

@@ -0,0 +1,4 @@
Kind = "service-defaults"
Name = "frontend"
Namespace = "apps"
Protocol = "http"

View File

@@ -0,0 +1,17 @@
Kind = "service-resolver"
Name = "frontend"
Namespace = "apps"
DefaultSubset = "v1"
Subsets = {
"v1" = {
Filter = "Service.Tags contains v1"
}
"v2" = {
Filter = "Service.Tags contains v2"
}
}
Failover = {
"*" = {
Datacenters = ["dc2"]
}
}

View File

@@ -0,0 +1,21 @@
Kind = "service-router"
Name = "frontend"
Namespace = "apps"
Routes = [
{
Match = {
HTTP = {
QueryParam = [
{
Name = "x-debug"
Present = "True"
},
]
}
}
Destination = {
Service = "frontend"
ServiceSubset = "v2"
}
}
]

View File

@@ -0,0 +1,15 @@
Kind = "service-splitter"
Name = "frontend"
Namespace = "apps"
Splits = [
{
Weight = 50
Service = "frontend"
ServiceSubset = "v1"
},
{
Weight = 50
Service = "frontend"
ServiceSubset = "v2"
},
]

View File

@@ -0,0 +1,20 @@
Kind = "ingress-gateway"
Name = "ingress-gateway"
# TLS {
# Enabled = true
# }
Listeners = [
{
Port = 8080
Protocol = "http"
Services = [
{
Name = "frontend"
Namespace = "apps"
Hosts = "*"
}
]
}
]

View File

@@ -0,0 +1,138 @@
project = "hashicups-demo-hackathon"
app "frontend" {
path = "./front"
build {
# use docker {}
use "docker-pull" {
image = "hashicorpdemoapp/frontend"
tag = "v0.0.4"
}
}
deploy {
use "exec" {
command = ["kubectl","apply","-f","<TPL>","-n","apps"]
template {
path = join("", ["${path.app}/", jsondecode(file("${path.project}/versions.json"))["frontend"]])
}
}
}
}
app "public-api" {
path = "./public-api"
url {
auto_hostname = false
}
build {
use "docker-pull" {
image = "hashicorpdemoapp/public-api"
tag = "v0.0.4"
}
}
deploy {
use "kubernetes" {
annotations = {
"consul.hashicorp.com/connect-inject" = "true"
"consul.hashicorp.com/connect-service" = "public-api"
# "consul.hashicorp.com/connect-service-upstreams" = "product-api:9090"
"consul.hashicorp.com/connect-service-upstreams" = "product-api:9090,payments:9091"
}
service_port = "8080"
service_account = "public-api"
static_environment = {
PRODUCT_API_URI = "http://localhost:9090"
PAYMENT_API_URI = "http://localhost:9091"
BIND_ADDRESS = ":8080"
}
namespace = "apps"
}
}
release {
use "kubernetes" {
port = "8080"
namespace = "apps"
}
}
}
app "product-api" {
path = "./product-api"
build {
use "docker-pull" {
image = "hashicorpdemoapp/product-api"
tag = "v0.0.15"
}
}
deploy {
use "exec" {
command = ["kubectl","apply","-f","<TPL>","-n","apps"]
template {
path = "${path.app}/product-api.yaml"
}
}
}
}
app "postgres" {
path = "./postgres"
build {
use "docker-pull" {
image = "hashicorpdemoapp/product-api-db"
tag = "v0.0.15"
}
}
deploy {
use "exec" {
command = ["kubectl","apply","-f","<TPL>","-n","apps"]
template {
path = "${path.app}/postgres.yaml"
}
}
}
}
app "payments" {
path = "./payments"
url {
auto_hostname = false
}
build {
use "docker-pull" {
image = "hashicorpdemoapp/payments"
tag = "v0.0.11"
}
}
deploy {
use "kubernetes" {
annotations = {
"consul.hashicorp.com/connect-inject" = "true"
"consul.hashicorp.com/connect-service" = "payments"
# "consul.hashicorp.com/connect-service-port" = "9091"
}
service_port = "8080"
service_account = "payments"
static_environment = {
}
namespace = "apps"
}
}
release {
use "kubernetes" {
port = "9091"
namespace = "apps"
}
}
}

View File

@@ -0,0 +1,113 @@
terraform {
required_version = ">= 0.15" # With 0.14 data sources are not refreshed on destroy and you need to do an apply before a destroy to auth to K8s and Helm
required_providers {
google = {
source = "hashicorp/google"
version = "3.64.0"
}
helm = {
source = "hashicorp/helm"
version = "2.1.2"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.1.0"
}
}
# backend "remote" {
# }
}
# Collect client config for GCP
data "google_client_config" "current" {
}
data "google_service_account" "owner_project" {
account_id = var.service_account
}
module "gke" {
source = "./modules/tf-gke"
# version = "0.1.0"
count = var.create_federation ? 2 : 1
dns_zone = var.dns_zone
gcp_project = var.gcp_project
gcp_region = var.gcp_region[count.index]
gcp_zone = var.gcp_zone[count.index]
gcs_bucket = var.gcs_bucket
gke_cluster = "${var.gke_cluster}${count.index + 1}"
default_gke = var.default_gke
default_network = var.default_network
owner = var.owner
service_account = var.service_account
}
module "k8s" {
source = "./modules/kubernetes"
depends_on = [
module.gke,
# data.google_container_cluster.primary_gke
]
providers = {
helm = helm.primary
kubernetes = kubernetes.primary
}
# token = data.google_client_config.current.access_token
# k8s_host = local.secondary_host
# k8s_cert = local.secondary_cert
cluster_endpoint = module.gke.0.k8s_endpoint
cluster_namespace = "consul"
ca_certificate = module.gke.0.gke_ca_certificate
location = var.gcp_zone[0]
gcp_region = var.gcp_region[0]
gcp_project = var.gcp_project
cluster_name = var.gke_cluster
config_bucket = var.gcs_bucket
nodes = var.consul_nodes
gcp_service_account = data.google_service_account.owner_project
dns_zone = var.dns_zone
consul_license = var.consul_license
values_file = "consul-values-dc.yaml"
consul_dc = "dc1"
enterprise = var.consul_enterprise
consul_version = var.consul_version
# envoy_version = var.envoy_version
chart_version = var.chart_version
}
module "k8s-sec" {
count = var.create_federation ? 1 : 0
source = "./modules/kubernetes"
depends_on = [
# module.gke,
module.k8s,
# data.google_container_cluster.secondary_gke
]
providers = {
helm = helm.secondary
kubernetes = kubernetes.secondary
}
# token = data.google_client_config.current.access_token
# k8s_host = local.secondary_host
# k8s_cert = local.secondary_cert
cluster_endpoint = module.gke.1.k8s_endpoint
cluster_namespace = "consul"
ca_certificate = module.gke.1.gke_ca_certificate
location = var.gcp_zone[1]
gcp_region = var.gcp_region[1]
gcp_project = var.gcp_project
cluster_name = var.gke_cluster
config_bucket = var.gcs_bucket
nodes = var.consul_nodes
gcp_service_account = data.google_service_account.owner_project
dns_zone = var.dns_zone
consul_license = var.consul_license
values_file = "consul-values-dc-fed.yaml"
federated = true
federation_secret = module.k8s.federation_secret
consul_dc = "dc2"
enterprise = var.consul_enterprise
consul_version = var.consul_version
# envoy_version = var.envoy_version
chart_version = var.chart_version
}

View File

@@ -0,0 +1,118 @@
terraform {
required_version = ">= 0.15" # 0.14 version has a problem when refreshing data sources on destroy
required_providers {
helm = {
source = "hashicorp/helm"
version = "2.1.2"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.1.0"
}
}
# backend "remote" {
# }
}
# The Helm provider creates the namespace, but if we want to create it manually would be with following lines
resource "kubernetes_namespace" "consul" {
metadata {
name = var.cluster_namespace
}
}
# Creating dynamically a hostname list to use later on template
# data "null_data_source" "hostnames" {
# count = var.nodes
# inputs = {
# hostnames = "consul-server-${count.index}"
# }
# }
# locals {
# hostnames = data.null_data_source.hostnames.*.inputs.hostnames
# }
# # Let's create a secret with the json credentials
# resource "google_service_account_key" "gcp_sa_key" {
# service_account_id = var.gcp_service_account.name
# }
# resource "kubernetes_secret" "google-application-credentials" {
# metadata {
# name = "gcp-creds"
# namespace = kubernetes_namespace.consul.metadata.0.name
# }
# data = {
# "credentials.json" = base64decode(google_service_account_key.gcp_sa_key.private_key)
# }
# }
resource "kubernetes_secret" "consul-license" {
count = var.consul_license == null ? 0 : 1
metadata {
name = "consul-ent-license"
namespace = kubernetes_namespace.consul.metadata.0.name
}
data = {
"key" = var.consul_license
}
}
resource "kubernetes_secret" "consul-federation" {
count = var.federated ? 1 : 0
metadata {
name = "consul-federation"
namespace = kubernetes_namespace.consul.metadata.0.name
}
data = var.federation_secret
}
# Because we are executing remotely using TFC/TFE we want to save our templates in a Cloud bucket
resource "google_storage_bucket_object" "consul-config" {
name = "${var.cluster_name}-${formatdate("YYMMDD_HHmm",timestamp())}.yml"
content = templatefile("${path.root}/templates/${var.values_file}",{
# version = "1.8.4",
image = var.enterprise ? "hashicorp/consul-enterprise:${var.consul_version}" : "consul:${var.consul_version}"
# envoy = "envoyproxy/envoy-alpine:${var.envoy_version}"
datacenter = var.consul_dc
enterprise = var.enterprise
license = var.consul_license
nodes = var.nodes
# http = var.tls == "enabled" ? "https" : "http",
# disable_tls = var.tls == "enabled" ? false : true,
# tls = var.tls
})
bucket = var.config_bucket
}
## I you want to create the template files locally uncomment the following lines (This is not working with remote execution in TFE)
# resource "local_file" "foo" {
# content = templatefile("${path.root}/templates/consul_values.yaml",{
# hostname = var.hostname,
# vault_version = var.vault_version
# })
# filename = "${path.root}/templates/vault.yaml"
# }
# We need to create a sleep to let the ingress Load Balancer be assigned, so we can get the Ingress data
# resource "time_sleep" "wait_60_seconds" {
# depends_on = [
# helm_release.consul,
# ]
# create_duration = "60s"
# }
# resource "google_dns_record_set" "consul" {
# count = var.dns_zone != null ? 1 : 0
# name = "${var.hostname}."
# type = "A"
# ttl = 300
# managed_zone = var.dns_zone
# rrdatas = [data.kubernetes_service.consul-ui.load_balancer_ingress.0.ip]
# }

View File

@@ -0,0 +1,64 @@
terraform {
required_version = ">= 0.12.26"
backend "remote" {}
}
# Collect client config for GCP
data "google_client_config" "current" {
}
data "google_service_account" "owner_project" {
account_id = var.service_account
}
resource "google_compute_network" "container_network" {
count = var.default_network ? 0 : 1
name = "${var.gke_cluster}-network"
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "container_subnetwork" {
count = var.default_network ? 0 : 1
name = "${var.gke_cluster}-subnetwork"
description = "auto-created subnetwork for cluster \"${var.gke_cluster}\""
region = var.gcp_region
ip_cidr_range = "10.2.0.0/16"
network = google_compute_network.container_network.0.self_link
}
module "gke" {
source = "./modules/gke"
region = var.gcp_region
zone = var.gcp_zone
project = var.gcp_project
cluster_name = var.gke_cluster
network = var.default_network ? null : google_compute_network.container_network.0.self_link
subnetwork = var.default_network ? null : google_compute_subnetwork.container_subnetwork.0.self_link
nodes = var.numnodes
node_type = var.node_type
owner = var.owner
default_gke = var.default_gke
}
resource "google_storage_bucket_object" "jx-requirements" {
name = "jx-requirements.yml"
content = templatefile("${path.module}/templates/jx-requirements.yml.tpl",{
gke_cluster = var.gke_cluster,
owner = var.owner,
github_org = var.owner,
zone = var.gcp_zone,
project = var.gcp_project
})
bucket = var.gcs_bucket
}
# resource "local_file" "jx-requirements" {
# content = templatefile("${path.module}/templates/jx-requirements.yml.tpl",{
# gke_cluster = var.gke_cluster,
# owner = var.owner,
# github_org = "dcanadillas",
# zone = var.gcp_zone,
# project = var.gcp_project
# })
# filename = "${path.module}/jx-requirements-${var.gke_cluster}.yml"
# }

View File

@@ -0,0 +1,93 @@
# provider "google" {
# project = var.gcp_project
# region = var.gcp_region
# }
# resource "google_compute_network" "vpc_network" {
# name = "${var.cluster_name}-network"
# }
data "google_container_engine_versions" "k8sversion" {
provider = google-beta
project = var.project
location = var.regional_k8s ? var.region : var.zone
version_prefix = "1.16."
}
resource "google_container_cluster" "primary" {
# provider = google-beta
# project = var.project
name = var.cluster_name
location = var.regional_k8s ? var.region : var.zone
# node_version = data.google_container_engine_versions.k8sversion.latest_node_version
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
remove_default_node_pool = var.default_gke ? false : true
initial_node_count = var.default_gke ? var.nodes : 1
# network = google_compute_network.vpc_network.self_link
network = var.network
subnetwork = var.subnetwork
min_master_version = data.google_container_engine_versions.k8sversion.latest_master_version
master_auth {
username = ""
password = ""
client_certificate_config {
issue_client_certificate = false
}
}
node_config {
machine_type = var.node_type
disk_type = "pd-ssd"
metadata = {
disable-legacy-endpoints = "true"
}
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/cloud-platform"
]
tags = [
"${var.owner}-gke"
]
}
}
resource "google_container_node_pool" "primary_nodes" {
count = var.default_gke ? 0 : 1
name = "${var.cluster_name}-node-pool"
location = google_container_cluster.primary.location
#version = data.google_container_engine_versions.k8sversion.latest_node_version
# location = var.regional_k8s == true ? var.region : var.zone
cluster = google_container_cluster.primary.name
node_count = var.nodes
node_config {
machine_type = var.node_type
disk_type = "pd-ssd"
metadata = {
disable-legacy-endpoints = "true"
}
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/cloud-platform"
]
tags = [
"${var.owner}-gke"
]
}
# autoscaling {
# min_node_count = 0
# max_node_count = var.nodes*2
# }
}

View File

@@ -0,0 +1,35 @@
data "google_container_cluster" "gke_cluster" {
depends_on = [
google_container_node_pool.primary_nodes,
]
name = google_container_cluster.primary.name
location = google_container_cluster.primary.location
}
output "ca_certificate" {
value = data.google_container_cluster.gke_cluster.master_auth.0.cluster_ca_certificate
# value = google_container_cluster.primary.master_auth.0.cluster_ca_certificate
}
# output "client_key" {
# depends_on = [
# google_container_node_pool.primary_nodes,
# ]
# value = google_container_cluster.primary.master_auth.0.client_key
# }
# output "client_certificate" {
# depends_on = [
# google_container_node_pool.primary_nodes,
# ]
# value = google_container_cluster.primary.master_auth.0.client_certificate
# }
output "cluster_endpoint" {
value = data.google_container_cluster.gke_cluster.endpoint
# value = data.google_container_cluster.gke_cluster.endpoint
}
output "cluster_name" {
depends_on = [
google_container_node_pool.primary_nodes,
]
# value = google_container_cluster.primary.name
value = data.google_container_cluster.gke_cluster.name
}

View File

@@ -0,0 +1,36 @@
variable "region" {
description = "Cloud region"
}
variable "project" {
description = "Cloud GCP project"
}
variable "node_type" {
description = "Machine type for nodes"
default = "n1-standard-2"
}
variable "zone" {
description = "availability zones"
}
variable "cluster_name" {
description = "Name of your cluster "
}
variable "nodes" {
description = "Number of nodes of the cluster"
}
variable "regional_k8s" {
description = "Set this to true if you want regional cluster with a master per zone"
default = false
}
variable "network" {
description = "The Network to be used for the K8s cluster"
default = "default"
}
variable "subnetwork" {
description = "The Network to be used for the K8s cluster"
}
variable "owner" {
description = "Owner name to tag clusters"
}
variable "default_gke" {
description = "Set it to true if you want to speed up GKE cluster creation by creating a default NodePool"
}

View File

@@ -0,0 +1,14 @@
output "gke_ca_certificate" {
value = base64decode(module.gke.ca_certificate)
}
# output "jx-requirements" {
# value = "https://storage.cloud.google.com/${google_storage_bucket_object.jx-requirements.bucket}/${google_storage_bucket_object.jx-requirements.output_name}"
# # value = google_storage_bucket_object.jx-requirements.self_link
# }
output "k8s_endpoint" {
value = module.gke.cluster_endpoint
}
output "cluster_name" {
value = module.gke.cluster_name
}

View File

@@ -0,0 +1,45 @@
variable "gcp_region" {
description = "Cloud region"
}
variable "gcp_project" {
description = "Cloud GCP project"
}
variable "node_type" {
description = "Machine type for nodes"
default = "n1-standard-2"
}
variable "gcp_zone" {
description = "availability zones"
}
variable "gke_cluster" {
description = "Name of your cluster "
}
variable "numnodes" {
description = "Number of nodes of the cluster"
default=3
}
variable "regional_k8s" {
description = "Set this to true if you want regional cluster with a master per zone"
default = false
}
variable "owner" {
description = "Owner name to tag clusters"
}
variable "gcs_bucket" {
description = "Bucket to save template files created"
}
variable "service_account" {
description = "Service Account to be used as scope permissions"
}
variable "default_gke" {
description = "Set it to true if you want to speed up GKE cluster creation by creating a default NodePool"
default = false
}
variable "default_network" {
description = "Set it to true if you cant to use the default network in GCP"
default = false
}
variable "dns_zone" {
description = "Cloud DNS zone to create record"
default = null
}

View File

@@ -0,0 +1,3 @@
policy "less-than-100-month" {
enforcement_level = "soft-mandatory"
}

View File

@@ -0,0 +1,62 @@
provider "aws" {
region = var.region
}
module "frontend" {
source = "./terraform-aws-server"
name = "frontend"
tags = { Owner = "jdoe@hashicorp.com", CostCenter = "12345", Environment = var.environment }
vpc_id = module.network.vpc_id
vpc_subnet_ids = module.network.public_subnets[0]
security_group_id = module.network.security_group_id
public_key = aws_key_pair.awskey.key_name
private_ip = "10.140.101.10"
upstream_ip = "10.140.101.11"
}
module "public_api" {
source = "./terraform-aws-server"
name = "public_api"
tags = { Owner = "jdoe@hashicorp.com", CostCenter = "12345", Environment = var.environment }
vpc_id = module.network.vpc_id
vpc_subnet_ids = module.network.public_subnets[0]
security_group_id = module.network.security_group_id
public_key = aws_key_pair.awskey.key_name
private_ip = "10.140.101.11"
upstream_ip = "10.140.101.12"
}
module "product_api" {
source = "./terraform-aws-server"
name = "product_api"
tags = { Owner = "jdoe@hashicorp.com", CostCenter = "12345", Environment = var.environment }
vpc_id = module.network.vpc_id
vpc_subnet_ids = module.network.public_subnets[0]
security_group_id = module.network.security_group_id
public_key = aws_key_pair.awskey.key_name
private_ip = "10.140.101.12"
upstream_ip = "10.140.101.13"
}
module "postgres" {
source = "./terraform-aws-server"
name = "postgres"
tags = { Owner = "jdoe@hashicorp.com", CostCenter = "12345", Environment = var.environment }
vpc_id = module.network.vpc_id
vpc_subnet_ids = module.network.public_subnets[0]
security_group_id = module.network.security_group_id
public_key = aws_key_pair.awskey.key_name
private_ip = "10.140.101.13"
upstream_ip = "10.140.101.14"
}
module "network" {
source = "./terraform-aws-network"
name = "Hashicups"
private_subnets = ["10.140.1.0/24", "10.140.2.0/24", "10.140.3.0/24"]
public_subnets = ["10.140.101.0/24", "10.140.102.0/24", "10.140.103.0/24"]
cidr_block = "10.140.0.0/16"
}

View File

@@ -0,0 +1,17 @@
output "output" {
value = <<README
frontend: ssh -i ~/.ssh/awskey.pem ubuntu@${module.frontend.public_ip}
public-api: ssh -i ~/.ssh/awskey.pem ubuntu@${module.public_api.public_ip}
product-api: ssh -i ~/.ssh/awskey.pem ubuntu@${module.product_api.public_ip}
postgres: ssh -i ~/.ssh/awskey.pem ubuntu@${module.postgres.public_ip}
Takes a few mins to install packages:
http://${module.frontend.public_ip}
README
}
output "environment" {
value = var.environment
}

View File

@@ -0,0 +1,22 @@
resource "random_id" "server" {
byte_length = 4
}
resource "aws_key_pair" "awskey" {
key_name = "awskey-${random_id.server.hex}"
public_key = tls_private_key.awskey.public_key_openssh
}
resource "tls_private_key" "awskey" {
algorithm = "RSA"
}
resource "null_resource" "awskey" {
provisioner "local-exec" {
command = "echo \"${tls_private_key.awskey.private_key_pem}\" > ~/.ssh/awskey.pem"
}
provisioner "local-exec" {
command = "chmod 600 ~/.ssh/awskey.pem"
}
}

View File

@@ -0,0 +1,15 @@
output "vpc_id" {
value = module.vpc.vpc_id
}
output "vpc_cidr" {
value = module.vpc.vpc_cidr_block
}
output "public_subnets" {
value = module.vpc.public_subnets
}
output "security_group_id" {
value = aws_security_group.security_group.id
}

View File

@@ -0,0 +1,18 @@
resource "aws_security_group" "security_group" {
name = "${var.name}-security-group"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

View File

@@ -0,0 +1,4 @@
variable "name" { default = "rchao" }
variable "private_subnets" { type = list(string)}
variable "public_subnets" { type = list(string)}
variable "cidr_block" {}

View File

@@ -0,0 +1,13 @@
data "aws_availability_zones" "available" {
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = "${var.name}-vpc"
cidr = var.cidr_block
azs = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1], data.aws_availability_zones.available.names[2]]
private_subnets = var.private_subnets
public_subnets = var.public_subnets
enable_dns_hostnames = true
enable_vpn_gateway = false
}

View File

@@ -0,0 +1,15 @@
output "db_password" {
value = var.db_password
}
output "db_username" {
value = var.db_username
}
output "db_endpoint" {
value = aws_db_instance.rds.endpoint
}
output "db_name" {
value = var.db_name
}

View File

@@ -0,0 +1,43 @@
resource "aws_security_group" "rds_access" {
vpc_id = var.vpc_id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "random_id" "server" {
byte_length = 4
}
resource "aws_db_subnet_group" "rds_subnet" {
name_prefix = var.name
subnet_ids = flatten([var.vpc_subnet_ids ])
}
resource "aws_db_instance" "rds" {
allocated_storage = 20
db_subnet_group_name = aws_db_subnet_group.rds_subnet.name
vpc_security_group_ids = [ aws_security_group.rds_access.id ]
publicly_accessible = true
engine = "postgres"
engine_version = "11.5"
identifier = var.name
instance_class = "db.t2.micro"
name = var.db_name
username = var.db_username
password = var.db_password
skip_final_snapshot = true
#storage_encrypted = true
}

View File

@@ -0,0 +1,7 @@
variable "name" {}
variable "vpc_id" {}
variable "vpc_cidr" {}
variable "vpc_subnet_ids" {}
variable "db_name" {}
variable "db_username" {}
variable "db_password" {}

View File

@@ -0,0 +1,43 @@
resource "random_id" "name" {
byte_length = 4
}
data "aws_iam_policy_document" "assume_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
data "aws_iam_policy_document" "instance" {
statement {
sid = "AllowSelfAssembly"
effect = "Allow"
resources = ["*"]
actions = [
"ec2:DescribeInstances",
]
}
}
resource "aws_iam_role" "instance" {
name = "hashicups-instance-role-${random_id.name.hex}"
assume_role_policy = data.aws_iam_policy_document.assume_role.json
}
resource "aws_iam_role_policy" "instance" {
name = "hashicups-instance-SelfAssembly-${random_id.name.hex}"
role = aws_iam_role.instance.id
policy = data.aws_iam_policy_document.instance.json
}
resource "aws_iam_instance_profile" "instance" {
name = "hashicups-instance-profile-${random_id.name.hex}"
role = aws_iam_role.instance.name
}

View File

@@ -0,0 +1,41 @@
terraform {
required_version = ">= 0.12"
}
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
#values = ["ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-*"]
values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"] # Canonical
}
data "template_file" "config" {
template = file("${path.module}/configs/${var.name}.tpl")
vars = {
upstream_ip = var.upstream_ip
}
}
resource "aws_instance" "instance" {
instance_type = "t2.small"
ami = data.aws_ami.ubuntu.id
vpc_security_group_ids = [ var.security_group_id ]
subnet_id = var.vpc_subnet_ids
associate_public_ip_address = true
key_name = var.public_key
iam_instance_profile = aws_iam_instance_profile.instance.id
private_ip = var.private_ip
tags = var.tags
user_data = data.template_file.config.rendered
}

View File

@@ -0,0 +1,7 @@
output "public_ip" {
value = aws_instance.instance.public_ip
}
output "private_ip" {
value = aws_instance.instance.private_ip
}

View File

@@ -0,0 +1,8 @@
variable "name" {}
variable "tags" {}
variable "vpc_id" {}
variable "vpc_subnet_ids" {}
variable "security_group_id" {}
variable "public_key" {}
variable "private_ip" {}
variable "upstream_ip" {}

View File

@@ -0,0 +1,2 @@
variable "region" { default = "us-west-2" }
variable "environment" { default = "not-defined" }

View File

@@ -0,0 +1,8 @@
module "tfrun-functions" {
source = "./common-functions/tfrun-functions/tfrun-functions.sentinel"
}
policy "limit-cost-by-workspace-name" {
source = "./limit-cost-by-workspace-name.sentinel"
enforcement_level = "soft-mandatory"
}

View File

@@ -0,0 +1,14 @@
resource "tfe_policy_set" "sentinel" {
name = "limit-cost-by-workspace-name"
description = "Limit costs with different limits based on workspace names."
organization = var.TFC_ORGANIZATION
policies_path = "gitclones/sentinel-policies"
global = true
vcs_repo {
identifier = var.GITHUB_IDENTIFIER
branch = "main"
ingress_submodules = false
oauth_token_id = var.OAUTH_TOKEN_ID
}
}

View File

@@ -0,0 +1,3 @@
variable "GITHUB_IDENTIFIER" {}
variable "TFC_ORGANIZATION" {}
variable "OAUTH_TOKEN_ID" {}

View File

@@ -0,0 +1,157 @@
//TFC VCS
resource "tfe_oauth_client" "test-oauth-client" {
organization = var.TFC_ORGANIZATION
api_url = "https://api.github.com"
http_url = "https://github.com"
oauth_token = var.GITHUB_TOKEN
service_provider = "github"
}
//TFC Workspaces
resource "tfe_workspace" "hashicups_prod" {
name = "hashicups-prod"
organization = var.TFC_ORGANIZATION
auto_apply = false
queue_all_runs = false
terraform_version = "0.14.9"
working_directory = "gitclones/hashicups-application/"
vcs_repo {
identifier = var.GITHUB_IDENTIFIER
branch = "main"
oauth_token_id = tfe_oauth_client.test-oauth-client.oauth_token_id
}
}
resource "tfe_workspace" "hashicups_stage" {
name = "hashicups-staging"
organization = var.TFC_ORGANIZATION
auto_apply = false
queue_all_runs = false
terraform_version = "0.14.9"
working_directory = "gitclones/hashicups-application/"
# VCS Section
vcs_repo {
identifier = var.GITHUB_IDENTIFIER
branch = "staging"
oauth_token_id = tfe_oauth_client.test-oauth-client.oauth_token_id
}
}
resource "tfe_workspace" "hashicups_dev" {
name = "hashicups-dev"
organization = var.TFC_ORGANIZATION
auto_apply = false
queue_all_runs = false
terraform_version = "0.14.9"
working_directory = "gitclones/hashicups-application/"
# VCS Section
vcs_repo {
identifier = var.GITHUB_IDENTIFIER
branch = "dev"
oauth_token_id = tfe_oauth_client.test-oauth-client.oauth_token_id
}
}
resource "tfe_variable" "prod_aws_access_key" {
key = "AWS_ACCESS_KEY_ID"
value = var.AWS_ACCESS_KEY_ID
category = "env"
sensitive = true
workspace_id = tfe_workspace.hashicups_prod.id
}
resource "tfe_variable" "prod_aws_secret_key" {
key = "AWS_SECRET_ACCESS_KEY"
value = var.AWS_SECRET_ACCESS_KEY
category = "env"
sensitive = true
workspace_id = tfe_workspace.hashicups_prod.id
}
resource "tfe_variable" "prod-region" {
key = "region"
value = "us-west-2"
category = "terraform"
sensitive = false
workspace_id = tfe_workspace.hashicups_prod.id
}
resource "tfe_variable" "prod_environment" {
key = "environment"
value = "production"
category = "terraform"
sensitive = false
workspace_id = tfe_workspace.hashicups_prod.id
}
resource "tfe_variable" "stage_aws_access_key" {
key = "AWS_ACCESS_KEY_ID"
value = var.AWS_ACCESS_KEY_ID
category = "env"
sensitive = true
workspace_id = tfe_workspace.hashicups_stage.id
}
resource "tfe_variable" "stage_aws_secret_key" {
key = "AWS_SECRET_ACCESS_KEY"
value = var.AWS_SECRET_ACCESS_KEY
category = "env"
sensitive = true
workspace_id = tfe_workspace.hashicups_stage.id
}
resource "tfe_variable" "stage_region" {
key = "region"
value = "us-west-2"
category = "terraform"
sensitive = false
workspace_id = tfe_workspace.hashicups_stage.id
}
resource "tfe_variable" "stage_environment" {
key = "environment"
value = "stage"
category = "terraform"
sensitive = false
workspace_id = tfe_workspace.hashicups_stage.id
}
resource "tfe_variable" "dev_aws_access_key" {
key = "AWS_ACCESS_KEY_ID"
value = var.AWS_ACCESS_KEY_ID
category = "env"
sensitive = true
workspace_id = tfe_workspace.hashicups_dev.id
}
resource "tfe_variable" "dev_aws_secret_key" {
key = "AWS_SECRET_ACCESS_KEY"
value = var.AWS_SECRET_ACCESS_KEY
category = "env"
sensitive = true
workspace_id = tfe_workspace.hashicups_dev.id
}
resource "tfe_variable" "dev_region" {
key = "region"
value = "us-west-2"
category = "terraform"
sensitive = false
workspace_id = tfe_workspace.hashicups_dev.id
}
resource "tfe_variable" "dev_environment" {
key = "environment"
value = "development"
category = "terraform"
sensitive = false
workspace_id = tfe_workspace.hashicups_dev.id
}
output "oauth_token_id" {
value = tfe_oauth_client.test-oauth-client.oauth_token_id
}

View File

@@ -0,0 +1,5 @@
variable "AWS_ACCESS_KEY_ID" {}
variable "AWS_SECRET_ACCESS_KEY" {}
variable "TFC_ORGANIZATION" {}
variable "GITHUB_TOKEN" {}
variable "GITHUB_IDENTIFIER" {}

View File

@@ -0,0 +1 @@
resource "null_resource" "foo" {}

View File

@@ -0,0 +1,4 @@
policy "enforce-mandatory-tags" {
source = "./enforce-mandatory-tags.sentinel"
enforcement_level = "hard-mandatory"
}

View File

@@ -0,0 +1,204 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=2.60.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "myresourcegroup" {
name = "${var.prefix}-workshop"
location = var.location
tags = {
environment = "Production"
}
}
resource "azurerm_virtual_network" "vnet" {
name = "${var.prefix}-vnet"
location = azurerm_resource_group.myresourcegroup.location
address_space = [var.address_space]
resource_group_name = azurerm_resource_group.myresourcegroup.name
}
resource "azurerm_subnet" "subnet" {
name = "${var.prefix}-subnet"
virtual_network_name = azurerm_virtual_network.vnet.name
resource_group_name = azurerm_resource_group.myresourcegroup.name
address_prefixes = [var.subnet_prefix]
}
resource "azurerm_network_security_group" "catapp-sg" {
name = "${var.prefix}-sg"
location = var.location
resource_group_name = azurerm_resource_group.myresourcegroup.name
security_rule {
name = "HTTP"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "HTTPS"
priority = 102
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "443"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "SSH"
priority = 101
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_network_interface" "catapp-nic" {
name = "${var.prefix}-catapp-nic"
location = var.location
resource_group_name = azurerm_resource_group.myresourcegroup.name
ip_configuration {
name = "${var.prefix}ipconfig"
subnet_id = azurerm_subnet.subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.catapp-pip.id
}
}
resource "azurerm_network_interface_security_group_association" "catapp-nic-sg-ass" {
network_interface_id = azurerm_network_interface.catapp-nic.id
network_security_group_id = azurerm_network_security_group.catapp-sg.id
}
resource "azurerm_public_ip" "catapp-pip" {
name = "${var.prefix}-ip"
location = var.location
resource_group_name = azurerm_resource_group.myresourcegroup.name
allocation_method = "Dynamic"
domain_name_label = "${var.prefix}-meow"
}
resource "azurerm_virtual_machine" "catapp" {
name = "${var.prefix}-meow"
location = var.location
resource_group_name = azurerm_resource_group.myresourcegroup.name
vm_size = var.vm_size
network_interface_ids = [azurerm_network_interface.catapp-nic.id]
delete_os_disk_on_termination = "true"
storage_image_reference {
publisher = var.image_publisher
offer = var.image_offer
sku = var.image_sku
version = var.image_version
}
storage_os_disk {
name = "${var.prefix}-osdisk"
managed_disk_type = "Standard_LRS"
caching = "ReadWrite"
create_option = "FromImage"
}
os_profile {
computer_name = var.prefix
admin_username = var.admin_username
admin_password = var.admin_password
}
os_profile_linux_config {
disable_password_authentication = false
}
tags = {}
# Added to allow destroy to work correctly.
depends_on = [azurerm_network_interface_security_group_association.catapp-nic-sg-ass]
}
# We're using a little trick here so we can run the provisioner without
# destroying the VM. Do not do this in production.
# If you need ongoing management (Day N) of your virtual machines a tool such
# as Chef or Puppet is a better choice. These tools track the state of
# individual files and can keep them in the correct configuration.
# Here we do the following steps:
# Sync everything in files/ to the remote VM.
# Set up some environment variables for our script.
# Add execute permissions to our scripts.
# Run the deploy_app.sh script.
resource "null_resource" "configure-cat-app" {
depends_on = [
azurerm_virtual_machine.catapp,
]
# Terraform 0.11
# triggers {
# build_number = "${timestamp()}"
# }
# Terraform 0.12
triggers = {
build_number = timestamp()
}
provisioner "file" {
source = "files/"
destination = "/home/${var.admin_username}/"
connection {
type = "ssh"
user = var.admin_username
password = var.admin_password
host = azurerm_public_ip.catapp-pip.fqdn
}
}
provisioner "remote-exec" {
inline = [
"sudo apt -y update",
"sleep 15",
"sudo apt -y update",
"sudo apt -y install apache2",
"sudo systemctl start apache2",
"sudo chown -R ${var.admin_username}:${var.admin_username} /var/www/html",
"chmod +x *.sh",
"PLACEHOLDER=${var.placeholder} WIDTH=${var.width} HEIGHT=${var.height} PREFIX=${var.prefix} ./deploy_app.sh",
"sudo apt -y install cowsay",
"cowsay Mooooooooooo!",
]
connection {
type = "ssh"
user = var.admin_username
password = var.admin_password
host = azurerm_public_ip.catapp-pip.fqdn
}
}
}

View File

@@ -0,0 +1,76 @@
provider "aws" {
region = var.region
}
data "aws_availability_zones" "available" {
state = "available"
filter {
name = "opt-in-status"
values = ["opt-in-not-required"]
}
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.65.0"
name = "${var.datacenter_name}-vpc"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.datacenter_name}-public-subnet" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.datacenter_name}-private-subnet" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "13.2.1"
cluster_name = var.datacenter_name
cluster_version = "1.17"
subnets = module.vpc.public_subnets
vpc_id = module.vpc.vpc_id
node_groups = {
first = {
desired_capacity = 3
max_capacity = 3
min_capacity = 3
instance_type = "t2.medium"
}
}
manage_aws_auth = false
write_kubeconfig = true
config_output_path = pathexpand("${var.output_dir}/${var.datacenter_name}")
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
load_config_file = false
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}

View File

@@ -0,0 +1,113 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "3.23.0"
constraints = ">= 2.68.0, >= 3.3.0, 3.23.0"
hashes = [
"h1:tSznQxPJvolDnmqqaTK9SsJ0bluTws7OAWcnc1t0ABs=",
"zh:30b0733027c00472618da998bc77967c692e238ae117c07e046fdd7336b83fa3",
"zh:3677550a8bef8e01c67cb615407dc8a69d32f4e36017033cd6f71a831c99d5de",
"zh:3c2fb4c14bfd43cf20ee25d0068ce09f1d48758408b8f1c88a096cea243612b3",
"zh:5577543322003693c4fe24a69ed0d47e58f867426fd704fac94cf5c16d3d6153",
"zh:6771f09d76ad01ffc04baa3bce7a3eed09f6a8a949274ffbd9d11756a58a4329",
"zh:7a57b79d304d17cf52ee3ddce91679f6b4289c5bdda2e31b763bf7d512e542d9",
"zh:815fb027e17bfe754b05367d20bd0694726a95a99b81e8d939ddd44e2b1f05a9",
"zh:a3d67db5ec0f4e9750eb19676a9a1aff36b0721e276a4ba789f42b991bf5951c",
"zh:cd67ff33860ad578172c19412ce608ba818e7590083197df2b793f870d6f50a3",
"zh:fbe0835055d1260fb77ad19a32a8726248ba7ac187f6c463ded90737b4cea8e6",
]
}
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "1.13.3"
constraints = ">= 1.11.1, 1.13.3"
hashes = [
"h1:iV0RArGM0KJ3n0ctwpNzcTkTAnx5G4wV3V3kYkRpJXU=",
"zh:11fd58df9c297a4f3cab82ce8eb2f54f1be27f78fa23be2273ecd545ab254b91",
"zh:5b9e6f352c5666d791e2658a1d18bf0990f3ab70c99c916c393a2ee7f385364c",
"zh:5c94f1350471a5c8e8ee6675874608c506a0bfd3164bdd91b802842723547e2c",
"zh:5d9c5c44dba9addbb86491339012096e74778bb4ea93b70f12333bffba3d05e6",
"zh:6336f9cbb0b580f247cebb97fb7d4cc5e7fe9cc734d8d958d84c4ea3f1e24041",
"zh:bca3b9d4dcbe6f804f5611a83add371dc03b5aa92271f60ebdc2216bfedfab28",
"zh:cbcdc87a593090f490f7899f4f2d302e0c7023155591fcf65e6fadd69f5452f0",
"zh:ec2886a1adbfe3c861b2deb9446369111b9c6116701ae73ef372dc7df5bb3c9e",
"zh:edb5b4172610672bb4d7425511961fda2047b8a00675b99ae6887cd2ece4bda9",
"zh:ff7ea7743246181ea739643d7751c37041c4016eb6bbc39beb1e3b4e99629112",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.1.0"
constraints = ">= 1.4.0"
hashes = [
"h1:KfieWtVyGWwplSoLIB5usKAUnrIkDQBkWaR5TI+4WYg=",
"zh:0f1ec65101fa35050978d483d6e8916664b7556800348456ff3d09454ac1eae2",
"zh:36e42ac19f5d68467aacf07e6adcf83c7486f2e5b5f4339e9671f68525fc87ab",
"zh:6db9db2a1819e77b1642ec3b5e95042b202aee8151a0256d289f2e141bf3ceb3",
"zh:719dfd97bb9ddce99f7d741260b8ece2682b363735c764cac83303f02386075a",
"zh:7598bb86e0378fd97eaa04638c1a4c75f960f62f69d3662e6d80ffa5a89847fe",
"zh:ad0a188b52517fec9eca393f1e2c9daea362b33ae2eb38a857b6b09949a727c1",
"zh:c46846c8df66a13fee6eff7dc5d528a7f868ae0dcf92d79deaac73cc297ed20c",
"zh:dc1a20a2eec12095d04bf6da5321f535351a594a636912361db20eb2a707ccc4",
"zh:e57ab4771a9d999401f6badd8b018558357d3cbdf3d33cc0c4f83e818ca8e94b",
"zh:ebdcde208072b4b0f8d305ebf2bfdc62c926e0717599dcf8ec2fd8c5845031c3",
"zh:ef34c52b68933bedd0868a13ccfd59ff1c820f299760b3c02e008dc95e2ece91",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.1.0"
constraints = ">= 2.1.0"
hashes = [
"h1:xhbHC6in3nQryvTQBWKxebi3inG5OCgHgc4fRxL0ymc=",
"zh:02a1675fd8de126a00460942aaae242e65ca3380b5bb192e8773ef3da9073fd2",
"zh:53e30545ff8926a8e30ad30648991ca8b93b6fa496272cd23b26763c8ee84515",
"zh:5f9200bf708913621d0f6514179d89700e9aa3097c77dac730e8ba6e5901d521",
"zh:9ebf4d9704faba06b3ec7242c773c0fbfe12d62db7d00356d4f55385fc69bfb2",
"zh:a6576c81adc70326e4e1c999c04ad9ca37113a6e925aefab4765e5a5198efa7e",
"zh:a8a42d13346347aff6c63a37cda9b2c6aa5cc384a55b2fe6d6adfa390e609c53",
"zh:c797744d08a5307d50210e0454f91ca4d1c7621c68740441cf4579390452321d",
"zh:cecb6a304046df34c11229f20a80b24b1603960b794d68361a67c5efe58e62b8",
"zh:e1371aa1e502000d9974cfaff5be4cfa02f47b17400005a16f14d2ef30dc2a70",
"zh:fc39cc1fe71234a0b0369d5c5c7f876c71b956d23d7d6f518289737a001ba69b",
"zh:fea4227271ebf7d9e2b61b89ce2328c7262acd9fd190e1fd6d15a591abfa848e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.1.0"
constraints = ">= 2.1.0"
hashes = [
"h1:rKYu5ZUbXwrLG1w81k7H3nce/Ys6yAxXhWcbtk36HjY=",
"zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc",
"zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626",
"zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff",
"zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2",
"zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992",
"zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427",
"zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc",
"zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f",
"zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b",
"zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7",
"zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a",
]
}
provider "registry.terraform.io/hashicorp/template" {
version = "2.2.0"
constraints = ">= 2.1.0"
hashes = [
"h1:0wlehNaxBX7GJQnPfQwTNvvAf38Jm0Nv7ssKGMaG6Og=",
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
]
}

View File

@@ -0,0 +1,7 @@
module "dc1" {
source = "../environments/terraform/eks"
datacenter_name = "layer7-observability-dc1"
region = "us-west-2"
output_dir = "./"
}

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "3.23.0"
}
}
}

View File

@@ -0,0 +1,129 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
}
}
provider "aws" {
region = var.aws_region
}
data "aws_availability_zones" "available" {
state = "available"
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.64.0"
name = "vpc-${var.resource_tags["project"]}-${var.resource_tags["environment"]}"
cidr = var.vpc_cidr_block
azs = data.aws_availability_zones.available.names
private_subnets = slice(var.private_subnet_cidr_blocks, 0, var.private_subnet_count)
public_subnets = slice(var.public_subnet_cidr_blocks, 0, var.public_subnet_count)
enable_nat_gateway = true
enable_vpn_gateway = var.enable_vpn_gateway
tags = var.resource_tags
}
module "app_security_group" {
source = "terraform-aws-modules/security-group/aws//modules/web"
version = "3.17.0"
name = "web-sg-${var.resource_tags["project"]}-${var.resource_tags["environment"]}"
description = "Security group for web-servers with HTTP ports open within VPC"
vpc_id = module.vpc.vpc_id
ingress_cidr_blocks = module.vpc.public_subnets_cidr_blocks
tags = var.resource_tags
}
module "lb_security_group" {
source = "terraform-aws-modules/security-group/aws//modules/web"
version = "3.17.0"
name = "lb-sg-${var.resource_tags["project"]}-${var.resource_tags["environment"]}"
description = "Security group for load balancer with HTTP ports open within VPC"
vpc_id = module.vpc.vpc_id
ingress_cidr_blocks = ["0.0.0.0/0"]
tags = var.resource_tags
}
resource "random_string" "lb_id" {
length = 3
special = false
}
module "elb_http" {
source = "terraform-aws-modules/elb/aws"
version = "2.4.0"
# Ensure load balancer name is unique
name = "lb-${random_string.lb_id.result}-${var.resource_tags["project"]}-${var.resource_tags["environment"]}"
internal = false
security_groups = [module.lb_security_group.this_security_group_id]
subnets = module.vpc.public_subnets
number_of_instances = length(aws_instance.app)
instances = aws_instance.app.*.id
listener = [{
instance_port = "80"
instance_protocol = "HTTP"
lb_port = "80"
lb_protocol = "HTTP"
}]
health_check = {
target = "HTTP:80/index.html"
interval = 10
healthy_threshold = 3
unhealthy_threshold = 10
timeout = 5
}
tags = var.resource_tags
}
data "aws_ami" "amazon_linux" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amzn2-ami-hvm-*-x86_64-gp2"]
}
}
resource "aws_instance" "app" {
count = var.instance_count
ami = data.aws_ami.amazon_linux.id
instance_type = var.ec2_instance_type
subnet_id = module.vpc.private_subnets[count.index % length(module.vpc.private_subnets)]
vpc_security_group_ids = [module.app_security_group.this_security_group_id]
user_data = <<-EOF
#!/bin/bash
sudo yum update -y
sudo yum install -y amazon-linux-extras
sudo amazon-linux-extras enable httpd_modules
sudo yum install httpd -y
sudo systemctl enable httpd
sudo systemctl start httpd
echo "<html><body><div>Hello, world!</div></body></html>" > /var/www/html/index.html
EOF
tags = var.resource_tags
}

View File

@@ -0,0 +1,4 @@
output "public_dns_name" {
description = "Public DNS names of the load balancer for this project"
value = module.elb_http.this_elb_dns_name
}

View File

@@ -0,0 +1,82 @@
# variable "declarations
variable "aws_region" {
description = "AWS region"
type = string
default = "us-east-2"
}
variable "vpc_cidr_block" {
description = "CIDR block for VPC"
type = string
default = "10.0.0.0/16"
}
variable "instance_count" {
description = "Number of instances to provision."
type = number
default = 2
}
variable "enable_vpn_gateway" {
description = "Enable a VPN gateway in your VPC."
type = bool
default = false
}
variable "public_subnet_count" {
description = "Number of public subnets."
type = number
default = 2
}
variable "private_subnet_count" {
description = "Number of private subnets."
type = number
default = 2
}
variable "public_subnet_cidr_blocks" {
description = "Available cidr blocks for public subnets"
type = list(string)
default = [
"10.0.1.0/24",
"10.0.2.0/24",
"10.0.3.0/24",
"10.0.4.0/24",
"10.0.5.0/24",
"10.0.6.0/24",
"10.0.7.0/24",
"10.0.8.0/24",
]
}
variable "private_subnet_cidr_blocks" {
description = "Available cidr blocks for private subnets"
type = list(string)
default = [
"10.0.101.0/24",
"10.0.102.0/24",
"10.0.103.0/24",
"10.0.104.0/24",
"10.0.105.0/24",
"10.0.106.0/24",
"10.0.107.0/24",
"10.0.108.0/24",
]
}
variable "resource_tags" {
description = "Tags to set for all resources"
type = map(string)
default = {
project = "my-project",
environment = "dev"
}
}
variable "ec2_instance_type" {
description = "AWS EC2 instance type."
type = string
default = "t2.micro"
}

View File

@@ -0,0 +1,21 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "3.42.0"
constraints = "~> 3.27"
hashes = [
"h1:C6/yDp6BhuDFx0qdkBuJj/OWUJpAoraHTJaU6ac38Rw=",
"zh:126c856a6eedddd8571f161a826a407ba5655a37a6241393560a96b8c4beca1a",
"zh:1a4868e6ac734b5fc2e79a4a889d176286b66664aad709435aa6acee5871d5b0",
"zh:40fed7637ab8ddeb93bef06aded35d970f0628025b97459ae805463e8aa0a58a",
"zh:68def3c0a5a1aac1db6372c51daef858b707f03052626d3427ac24cba6f2014d",
"zh:6db7ec9c8d1803a0b6f40a664aa892e0f8894562de83061fa7ac1bc51ff5e7e5",
"zh:7058abaad595930b3f97dc04e45c112b2dbf37d098372a849081f7081da2fb52",
"zh:8c25adb15a19da301c478aa1f4a4d8647cabdf8e5dae8331d4490f80ea718c26",
"zh:8e129b847401e39fcbc54817726dab877f36b7f00ff5ed76f7b43470abe99ff9",
"zh:d268bb267a2d6b39df7ddee8efa7c1ef7a15cf335dfa5f2e64c9dae9b623a1b8",
"zh:d6eeb3614a0ab50f8e9ab5666ae5754ea668ce327310e5b21b7f04a18d7611a8",
"zh:f5d3c58055dff6e38562b75d3edc908cb2f1e45c6914f6b00f4773359ce49324",
]
}

View File

@@ -0,0 +1,35 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.27"
}
}
required_version = "~> 0.15.4"
}
provider "aws" {
profile = "default"
region = var.region
}
data "aws_ami" "amazon_linux" {
owners = ["amazon"]
most_recent = true
filter {
name = "name"
values = ["amzn2-ami-hvm-*-x86_64-ebs"]
}
}
resource "aws_instance" "server" {
ami = data.aws_ami.amazon_linux.id
instance_type = "t2.micro"
tags = {
Name = "Learn Refresh"
}
}

View File

@@ -0,0 +1,4 @@
variable "region" {
description = "AWS region in which to provision infrastructure"
default = "us-east-2"
}

View File

@@ -0,0 +1,71 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.24.1"
}
}
required_version = "~> 0.15"
backend "remote" {
hostname = "app.terraform.io"
organization = "<YOUR-ORGANIZATION-NAME>"
workspaces {
name = "state-versioning"
}
}
}
provider "aws" {
region = var.region
}
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"] # Canonical
}
resource "aws_instance" "example" {
ami = data.aws_ami.ubuntu.id
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.sg_web.id]
user_data = <<-EOF
#!/bin/bash
echo "Hello, World" > index.html
nohup busybox httpd -f -p 8080 &
EOF
tags = {
Name = "terraform-learn-state-versioning"
}
}
resource "aws_security_group" "sg_web" {
name = "sg_web"
description = "allow 8080"
}
resource "aws_security_group_rule" "sg_web" {
type = "ingress"
to_port = "8080"
from_port = "8080"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.sg_web.id
lifecycle {
create_before_destroy = true
}
}

View File

@@ -0,0 +1,128 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "3.39.0"
constraints = "~> 3.39"
hashes = [
"h1:fjlp3Pd3QsTLghNm7TUh/KnEMM2D3tLb7jsDLs8oWUE=",
"zh:2014b397dd93fa55f2f2d1338c19e5b2b77b025a76a6b1fceea0b8696e984b9c",
"zh:23d59c68ab50148a0f5c911a801734e9934a1fccd41118a8efb5194135cbd360",
"zh:412eab41d4934ca9c47083faa128e4cd585c3bb44ad718e40d67091aebc02f4e",
"zh:4b75e0a259b56d97e66b7d69f3f25bd4cc7be2440c0fe35529f46de7d40a49d3",
"zh:694a32519dcca5bd8605d06481d16883d55160d97c1f4039deb13c6ca8de8427",
"zh:6a0bcef43c2d9a97aeaaac3c5d1d6728dc2464a51a014f118c691c79029d0903",
"zh:6d78fc7c663247ca2a80f276008dcdafece4cac75e2639bbce188c08b796040a",
"zh:78f846a505d7b64b67feed1527d4d2b40130dadaf8e3112113685e148f49b156",
"zh:881bc969432d3ef6ec70f5a762c3415e037904338579b0a360c6818b74d26e59",
"zh:96c1ca80c1d693a3eef80489adb45c076ee8e6878e461d6c29b05388d4b95f48",
"zh:9be5fa342272586fc6e319e20f21c0c5c801b05dcf7d59e473ad0882c9ecfa70",
]
}
provider "registry.terraform.io/hashicorp/http" {
version = "2.1.0"
hashes = [
"h1:GYoVrTtiSAE3AlP1fad3fFmHoPaXAPhm/DJyMcVCwZA=",
"zh:03d82dc0887d755b8406697b1d27506bc9f86f93b3e9b4d26e0679d96b802826",
"zh:0704d02926393ddc0cfad0b87c3d51eafeeae5f9e27cc71e193c141079244a22",
"zh:095ea350ea94973e043dad2394f10bca4a4bf41be775ba59d19961d39141d150",
"zh:0b71ac44e87d6964ace82979fc3cbb09eb876ed8f954449481bcaa969ba29cb7",
"zh:0e255a170db598bd1142c396cefc59712ad6d4e1b0e08a840356a371e7b73bc4",
"zh:67c8091cfad226218c472c04881edf236db8f2dc149dc5ada878a1cd3c1de171",
"zh:75df05e25d14b5101d4bc6624ac4a01bb17af0263c9e8a740e739f8938b86ee3",
"zh:b4e36b2c4f33fdc44bf55fa1c9bb6864b5b77822f444bd56f0be7e9476674d0e",
"zh:b9b36b01d2ec4771838743517bc5f24ea27976634987c6d5529ac4223e44365d",
"zh:ca264a916e42e221fddb98d640148b12e42116046454b39ede99a77fc52f59f4",
"zh:fe373b2fb2cc94777a91ecd7ac5372e699748c455f44f6ea27e494de9e5e6f92",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.1.0"
hashes = [
"h1:KfieWtVyGWwplSoLIB5usKAUnrIkDQBkWaR5TI+4WYg=",
"zh:0f1ec65101fa35050978d483d6e8916664b7556800348456ff3d09454ac1eae2",
"zh:36e42ac19f5d68467aacf07e6adcf83c7486f2e5b5f4339e9671f68525fc87ab",
"zh:6db9db2a1819e77b1642ec3b5e95042b202aee8151a0256d289f2e141bf3ceb3",
"zh:719dfd97bb9ddce99f7d741260b8ece2682b363735c764cac83303f02386075a",
"zh:7598bb86e0378fd97eaa04638c1a4c75f960f62f69d3662e6d80ffa5a89847fe",
"zh:ad0a188b52517fec9eca393f1e2c9daea362b33ae2eb38a857b6b09949a727c1",
"zh:c46846c8df66a13fee6eff7dc5d528a7f868ae0dcf92d79deaac73cc297ed20c",
"zh:dc1a20a2eec12095d04bf6da5321f535351a594a636912361db20eb2a707ccc4",
"zh:e57ab4771a9d999401f6badd8b018558357d3cbdf3d33cc0c4f83e818ca8e94b",
"zh:ebdcde208072b4b0f8d305ebf2bfdc62c926e0717599dcf8ec2fd8c5845031c3",
"zh:ef34c52b68933bedd0868a13ccfd59ff1c820f299760b3c02e008dc95e2ece91",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.14"
constraints = "~> 1.4.14"
hashes = [
"h1:l64RWHz6dek0+kx0ZVvbCjAjqQfOVq7QnSmvLqLEYOI=",
"zh:036cc8e0c1c6c2f91573149910eca29a7107b3415536eabeb2581861525da64a",
"zh:1414e2deb87af66a47e44ab5472b4606294cf511722beae2c0a3680041d66635",
"zh:623184a22b347fa5b696d3fbee35f5bff9ed30fbc8b067715c52b6300d655789",
"zh:7a026a57148a7c2e8a08a83c3641898911a7d9998c38eb2c6ca634107ccf49f9",
"zh:87d34e879284453b2ac825f8bb9c88c85027d404b1b9fa445ec97b519dfa59cb",
"zh:90591119307c2f3dd15a6a78964731689444fb1ce3d393eddf83e05a2f187b80",
"zh:b2cbf5e4d4f2d500804e7f1968b3fd2cebd4b164ccf76d7cb2c99ed1eb23957e",
"zh:d5f19ab3d0d172be8af098bb62b47667c632af736c60d1acab0fc1c31dbbcb99",
"zh:ee5f7f75a642eed607d4824b5888e4aacfc4dd435d54d9523d8f8165695d52a1",
"zh:f6300309339221a5f0863bec32d96b38a8e545c5a87b43c5bb8c65d2ff0492ed",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.1.0"
hashes = [
"h1:xhbHC6in3nQryvTQBWKxebi3inG5OCgHgc4fRxL0ymc=",
"zh:02a1675fd8de126a00460942aaae242e65ca3380b5bb192e8773ef3da9073fd2",
"zh:53e30545ff8926a8e30ad30648991ca8b93b6fa496272cd23b26763c8ee84515",
"zh:5f9200bf708913621d0f6514179d89700e9aa3097c77dac730e8ba6e5901d521",
"zh:9ebf4d9704faba06b3ec7242c773c0fbfe12d62db7d00356d4f55385fc69bfb2",
"zh:a6576c81adc70326e4e1c999c04ad9ca37113a6e925aefab4765e5a5198efa7e",
"zh:a8a42d13346347aff6c63a37cda9b2c6aa5cc384a55b2fe6d6adfa390e609c53",
"zh:c797744d08a5307d50210e0454f91ca4d1c7621c68740441cf4579390452321d",
"zh:cecb6a304046df34c11229f20a80b24b1603960b794d68361a67c5efe58e62b8",
"zh:e1371aa1e502000d9974cfaff5be4cfa02f47b17400005a16f14d2ef30dc2a70",
"zh:fc39cc1fe71234a0b0369d5c5c7f876c71b956d23d7d6f518289737a001ba69b",
"zh:fea4227271ebf7d9e2b61b89ce2328c7262acd9fd190e1fd6d15a591abfa848e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.1.0"
hashes = [
"h1:rKYu5ZUbXwrLG1w81k7H3nce/Ys6yAxXhWcbtk36HjY=",
"zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc",
"zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626",
"zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff",
"zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2",
"zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992",
"zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427",
"zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc",
"zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f",
"zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b",
"zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7",
"zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a",
]
}
provider "registry.terraform.io/hashicorp/template" {
version = "2.2.0"
hashes = [
"h1:0wlehNaxBX7GJQnPfQwTNvvAf38Jm0Nv7ssKGMaG6Og=",
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
]
}

View File

@@ -0,0 +1,97 @@
locals {
client_load_balancers = [
{
name = "platform"
listeners = [
{
port = 8081
protocol = "TCP"
},
{
port = 9090
protocol = "TCP"
},
{
port = 3000
protocol = "TCP"
},
],
},
]
allowed_ips = length(var.allowed_ips) == 0 ? ["${chomp(data.http.my_ip.body)}/32"] : formatlist("%s/32", var.allowed_ips)
}
data "http" "my_ip" {
url = "https://icanhazip.com/"
}
resource "random_pet" "stack_name" {}
module "image" {
source = "../../../infrastructure/aws/terraform/modules/aws-nomad-image"
depends_on = [null_resource.preflight_check]
ami_id = var.ami_id
region = var.region
vpc_id = module.network.vpc_id
subnet_id = module.network.subnet_id
stack_name = random_pet.stack_name.id
owner_name = var.owner_name
owner_email = var.owner_email
}
module "servers" {
source = "../../../infrastructure/aws/terraform/modules/aws-nomad-servers"
depends_on = [null_resource.preflight_check]
stack_name = random_pet.stack_name.id
ami_id = module.image.id
key_name = var.key_name
owner_name = var.owner_name
owner_email = var.owner_email
subnet_ids = [module.network.subnet_id]
security_group_ids = [module.network.agents_sg_id]
}
module "clients_platform" {
source = "../../../infrastructure/aws/terraform/modules/aws-nomad-clients"
depends_on = [null_resource.preflight_check]
nomad_datacenter = "platform"
stack_name = random_pet.stack_name.id
ami_id = module.image.id
key_name = var.key_name
owner_name = var.owner_name
owner_email = var.owner_email
security_group_ids = [module.network.agents_sg_id, module.network.clients_sg_ids[0]]
subnet_ids = [module.network.subnet_id]
load_balancer_names = [module.network.clients_lb_names[0]]
}
module "clients_batch" {
source = "../../../infrastructure/aws/terraform/modules/aws-nomad-clients"
depends_on = [null_resource.preflight_check]
nomad_datacenter = "batch_workers"
desired_capacity = 0
stack_name = random_pet.stack_name.id
ami_id = module.image.id
key_name = var.key_name
owner_name = var.owner_name
owner_email = var.owner_email
security_group_ids = [module.network.agents_sg_id]
subnet_ids = [module.network.subnet_id]
}
module "network" {
source = "../../../infrastructure/aws/terraform/modules/aws-nomad-network"
depends_on = [null_resource.preflight_check]
stack_name = random_pet.stack_name.id
owner_name = var.owner_name
owner_email = var.owner_email
server_ids = module.servers.ids
allowed_ips = local.allowed_ips
client_load_balancers = local.client_load_balancers
}

View File

@@ -0,0 +1,55 @@
terraform {
required_version = ">= 0.13"
required_providers {
aws = {
version = "~> 3.39"
}
nomad = {
version = "~> 1.4.14"
}
}
}
# Providers.
provider "http" {}
provider "aws" {
region = var.region
}
provider "nomad" {
address = module.network.nomad_addr
}
# Pre-flight check.
resource "null_resource" "preflight_check" {
provisioner "local-exec" {
command = <<EOF
aws --version && \
packer --version && \
nomad --version
EOF
}
}
# Nomad jobs.
module "nomad_jobs" {
source = "../shared/terraform/modules/nomad-jobs"
depends_on = [null_resource.preflight_check]
nomad_addr = module.network.nomad_addr
}
resource "nomad_job" "autoscaler" {
depends_on = [null_resource.preflight_check, module.nomad_jobs]
jobspec = templatefile(
"${path.module}/jobs/autoscaler.nomad.tpl",
{
aws_region = var.region,
aws_asg_name = module.clients_batch.asg_name,
}
)
}

View File

@@ -0,0 +1,21 @@
# Required variables.
variable "key_name" {}
variable "owner_name" {}
variable "owner_email" {}
variable "region" {}
# Optional variables.
variable "ami_id" {
default = ""
description = "AMI ID to use to provision instances. If left empty, a new image will be created."
}
variable "allowed_ips" {
default = ""
description = "List of IP addresses allowed to access the infrastructure. If left empty, only the IP of the machine running Terraform will be allowed."
}
variable "vpc_cidr" {
default = "172.31.0.0/16"
description = ""
}

View File

@@ -0,0 +1,59 @@
variable "owner_email" {}
variable "owner_name" {}
variable "region" {}
variable "vpc_id" {}
variable "subnet_id" {}
variable "stack_name" {}
source "amazon-ebs" "hashistack" {
ami_name = var.stack_name
region = var.region
subnet_id = var.subnet_id
vpc_id = var.vpc_id
instance_type = "t2.medium"
associate_public_ip_address = true
source_ami_filter {
filters = {
virtualization-type = "hvm"
name = "ubuntu/images/*ubuntu-focal-20.04-amd64-server-*"
root-device-type = "ebs"
}
owners = ["099720109477"] # Canonical's owner ID
most_recent = true
}
communicator = "ssh"
ssh_username = "ubuntu"
tags = {
OS = "Ubuntu"
Release = "20.04"
Architecture = "amd64"
OwnerName = var.owner_name
OwnerEmail = var.owner_email
}
}
build {
sources = [
"source.amazon-ebs.hashistack"
]
provisioner "shell" {
inline = [
"sudo mkdir -p /ops",
"sudo chmod 777 /ops"
]
}
provisioner "file" {
source = "../../shared/packer/"
destination = "/ops"
}
provisioner "shell" {
script = "../../shared/packer/scripts/setup.sh"
}
}

View File

@@ -0,0 +1,62 @@
locals {
name_prefix = format("%s-%s", var.stack_name, var.nomad_node_class)
load_balancers = var.load_balancer_names != [""] ? var.load_balancer_names : [""]
}
resource "aws_launch_template" "clients" {
name_prefix = local.name_prefix
image_id = var.ami_id
instance_type = var.instance_type
key_name = var.key_name
vpc_security_group_ids = var.security_group_ids
user_data = base64encode(data.template_file.user_data.rendered)
iam_instance_profile {
name = aws_iam_instance_profile.clients.name
}
tag_specifications {
resource_type = "instance"
tags = {
Name = "${local.name_prefix}-client"
ConsulAutoJoin = "auto-join"
OwnerName = var.owner_name
OwnerEmail = var.owner_email
}
}
block_device_mappings {
device_name = "/dev/xvdd"
ebs {
volume_type = "gp2"
volume_size = "50"
delete_on_termination = "true"
}
}
}
resource "aws_autoscaling_group" "clients" {
name_prefix = local.name_prefix
desired_capacity = var.desired_capacity
min_size = var.min_size
max_size = var.max_size
load_balancers = local.load_balancers
vpc_zone_identifier = var.subnet_ids
launch_template {
id = aws_launch_template.clients.id
version = "$Latest"
}
tag {
key = "OwnerName"
value = var.owner_name
propagate_at_launch = true
}
tag {
key = "OwnerEmail"
value = var.owner_email
propagate_at_launch = true
}
}

View File

@@ -0,0 +1,97 @@
# Required variables.
variable "ami_id" {
type = string
description = "The AMI ID to use when launching new instances."
}
variable "key_name" {
type = string
description = "The SSH key name used to access instances."
}
variable "owner_name" {
type = string
description = "The name used to identify the owner of the resources provisioned by this module. It will be stored in a tag called OwnerName."
}
variable "owner_email" {
type = string
description = "The email used to contact the owner of the resources provisioned by this module. It will be stored in a tag called OwnerEmail."
}
variable "security_group_ids" {
type = list(string)
description = "List of security group IDs to associate the instances."
}
variable "subnet_ids" {
type = list(string)
description = "List of subnet IDs to use to launch instances."
}
# Optional variables.
variable "consul_binary_url" {
type = string
default = ""
description = "The URL that points to the Consul binary to be used."
}
variable "desired_capacity" {
type = number
default = 1
description = "Number of initial instances to launch."
}
variable "instance_type" {
type = string
default = "t2.small"
description = "EC2 instance type to use for new instances."
}
variable "load_balancer_names" {
type = list(string)
default = []
description = "List of ELB names useb by the ASG."
}
variable "max_size" {
type = number
default = 5
description = "Maximum number of instances allowed."
}
variable "min_size" {
type = number
default = 0
description = "Minimum number of instances allowed."
}
variable "nomad_datacenter" {
type = string
default = "dc1"
description = "Nomad datacenter where the clients will be registered."
}
variable "nomad_node_class" {
type = string
default = "hashistack"
description = "Node class associated with the Nomad clients provisioned by this module."
}
variable "nomad_binary_url" {
type = string
default = ""
description = "The URL that points to the Nomad binary to be used."
}
variable "retry_join" {
type = string
default = "provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"
description = "Cloud auto-join expression by the Consul and Nomad agents."
}
variable "stack_name" {
type = string
default = "hashistack"
description = "Name used to identify resources provisioned by this module."
}

View File

@@ -0,0 +1,65 @@
locals {
build_image = var.ami_id == ""
image = local.build_image ? data.aws_ami.built[0] : data.aws_ami.existing[0]
image_id = local.image.id
snapshot_id = [for b in local.image.block_device_mappings : lookup(b.ebs, "snapshot_id", "")][0]
}
resource "null_resource" "packer_build" {
count = local.build_image ? 1 : 0
provisioner "local-exec" {
command = <<EOF
cd ${path.module}/../../../packer && \
packer build -force \
-var 'owner_name=${var.owner_name}' \
-var 'owner_email=${var.owner_email}' \
-var 'region=${var.region}' \
-var 'stack_name=${var.stack_name}' \
-var 'vpc_id=${var.vpc_id}' \
-var 'subnet_id=${var.subnet_id}' \
aws-packer.pkr.hcl
EOF
}
}
resource "local_file" "cleanup" {
count = local.build_image ? 1 : 0
content = "${local.image_id},${local.snapshot_id},${var.region}"
filename = ".cleanup-${local.image_id}"
file_permission = "0644"
provisioner "local-exec" {
when = destroy
command = <<EOF
aws ec2 deregister-image --image-id ${split(",", self.content)[0]} --region ${split(",", self.content)[2]} &&
aws ec2 delete-snapshot --snapshot-id ${split(",", self.content)[1]} --region ${split(",", self.content)[2]}
EOF
}
}
data "aws_ami" "built" {
depends_on = [null_resource.packer_build]
count = local.build_image ? 1 : 0
owners = ["self"]
most_recent = true
filter {
name = "name"
values = [var.stack_name]
}
}
data "aws_ami" "existing" {
count = local.build_image ? 0 : 1
owners = ["self"]
most_recent = true
filter {
name = "image-id"
values = [var.ami_id]
}
}

Some files were not shown because too many files have changed in this diff Show More