1
0
mirror of https://github.com/Loofort/terraform-aws-gitea synced 2024-11-22 02:32:02 +01:00

initial commit

This commit is contained in:
illia 2018-12-16 15:12:43 +03:00
parent 5f42a9e81f
commit d61accfbdf
10 changed files with 346 additions and 1 deletions

4
.gitignore vendored Normal file

@ -0,0 +1,4 @@
env.txt
.terraform/*
aws-init/terraform.tfstate
key.pem

@ -1,2 +1,6 @@
# terraform-aws-gitea
terraform template for setup gitea in aws
This terraform template creates AWS infrastructure for the Gitea source control service.
1) The template uses remote-state-backend, so it need to initialize the s3 bucket firstly - just run terraform-init.sh
2) Then apply terraform as usual - terraform apply
3) Finally navigate to you created gitea service (see terraform output for for dns name). Or ssh to server - ./connect.sh

49
aws-init/aws-init.tf Normal file

@ -0,0 +1,49 @@
variable "name" {
description = "the name of infrastructure project"
}
variable "destroy" {
default = false
}
variable "tags" {
default = {
Purpose = "terraform state storage"
}
}
# access_key, secret_key, region are provided by env varaibles
provider "aws" {}
resource "aws_s3_bucket" "bucket" {
tags = "${var.tags}"
bucket = "${var.name}"
acl = "private"
versioning {
enabled = true
}
lifecycle {
prevent_destroy = true
}
force_destroy = "${var.destroy}"
}
resource "aws_dynamodb_table" "table" {
tags = "${var.tags}"
name = "${var.name}"
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
attribute {
name = "LockID"
type = "S"
}
lifecycle {
prevent_destroy = true
}
}
############## outputs #################
output "bucket" {
value = "${aws_s3_bucket.bucket.bucket}"
}
output "table" {
value = "${aws_dynamodb_table.table.name}"
}

15
connect.sh Executable file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -euxo pipefail
#ip=$(terraform output -json | jq -r '.public_ip.value')
#terraform output -json | jq -r '.private_key.value' > key.pem
ip=$(terraform state show aws_instance.host | grep "public_ip " | sed 's/public_ip *= //')
terraform state show tls_private_key.key | perl -ne 'BEGIN{undef $/;} /(-----BEGIN RSA PRIVATE.*?PRIVATE KEY-----)/s and print "$1"' > key.pem
chmod 400 key.pem
ssh -i key.pem -oStrictHostKeyChecking=no ec2-user@$ip
rm -f key.pem
# generate key manually
# ssh-keygen -t rsa -b 4096 -C "skarbdev@gmail.com" -f sshkey -q -N ""

39
docker-compose.yml Normal file

@ -0,0 +1,39 @@
version: "2"
networks:
gitea:
external: false
services:
server:
image: gitea/gitea:latest
environment:
- USER_UID=1000
- USER_GID=1000
- DB_TYPE=postgres
- DB_HOST=db:5432
- DB_NAME=gitea
- DB_USER=gitea
- DB_PASSWD=gitea
restart: always
networks:
- gitea
volumes:
- ./gitea:/data
ports:
- "3000:3000"
- "222:22"
depends_on:
- db
db:
image: postgres:9.6
restart: always
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
networks:
- gitea
volumes:
- ./postgres:/var/lib/postgresql/data

115
main.tf Normal file

@ -0,0 +1,115 @@
module "vpc" {
source = "./vpc"
cidr = "10.0.0.0/16"
cidr_subnet = "10.0.1.0/24"
tags = "${var.tags}"
}
data "aws_ami" "al2" {
owners = ["amazon"]
most_recent = true
name_regex = "^amzn2-ami-hvm-2.0.\\d+-x86_64-gp2$",
filter {
name = "name"
values = ["amzn2-ami-hvm-2.*"]
}
}
resource "tls_private_key" "key" {
algorithm = "RSA"
ecdsa_curve = "4096"
}
resource "aws_key_pair" "key" {
public_key = "${tls_private_key.key.public_key_openssh}"
}
resource "aws_instance" "host" {
ami = "${data.aws_ami.al2.id}"
instance_type = "t2.micro"
subnet_id = "${module.vpc.subnet}"
key_name = "${aws_key_pair.key.key_name}"
vpc_security_group_ids = ["${aws_security_group.web.id}"]
monitoring = false # monitoring per 5 min for free (intead off 1 min payed)
#iam_instance_profile = "${var.iam_instance_profile}"
user_data = <<HEREDOC
#!/bin/bash
sudo yum update -y
sudo amazon-linux-extras install docker
sudo service docker start
sudo usermod -a -G docker ec2-user
sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
HEREDOC
associate_public_ip_address = true
volume_tags = "${var.tags}"
tags = "${var.tags}"
#root_block_device = "${var.root_block_device}"
#ebs_block_device = "${var.ebs_block_device}"
#ephemeral_block_device = "${var.ephemeral_block_device}"
provisioner "remote-exec" {
inline = [
"cloud-init status --wait",
"mkdir ~/gitea",
]
}
provisioner "file" {
source = "docker-compose.yml"
destination = "~/gitea/docker-compose.yml"
}
provisioner "remote-exec" {
inline = [
"cd ~/gitea/",
"/usr/local/bin/docker-compose up -d",
]
}
connection {
type = "ssh"
user = "ec2-user"
private_key = "${tls_private_key.key.private_key_pem}"
}
}
# Security Group
resource "aws_security_group" "web" {
vpc_id = "${module.vpc.id}"
tags = "${var.tags}"
name = "Gitea Server"
description = "allows custom ssh and www"
ingress {
from_port = 3000
to_port = 3000
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = "22"
to_port = "22"
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

19
meta.tf Normal file

@ -0,0 +1,19 @@
################# inputs ################
variable "tags" {
description = "A map of tags to add to all resources"
default = {
Project = "gitea"
}
}
# The backend and provider are configured by env variables
provider "aws" {}
terraform {
backend "s3" {}
}
############## outputs #################
output "amazon_domain" {
value = "${aws_instance.host.public_dns}"
}

30
terraform-init.sh Executable file

@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -euxo pipefail
if [ "$#" -ne 1 ]; then
echo "usage: $0 name"
echo "where name is the s3 bucket (and dynamodb table) to be created for tf-remote-state storage"
exit 1
fi
tfname=$1
tfstate=aws-init/terraform.tfstate
terraform init aws-init
# first, try to import remote storage
terraform import -state=$tfstate -backup=- -config aws-init aws_s3_bucket.bucket $tfname || true
terraform import -state=$tfstate -backup=- -config aws-init aws_dynamodb_table.table $tfname || true
# create remote storage
terraform apply -auto-approve -state=$tfstate -backup=- -var "name=$tfname" aws-init
# to destroy (firstly set: prevent_destroy = false) :
#terraform apply -auto-approve -state=$tfstate -backup=- -var "name=$tfname" -var "destroy=true" aws-init
#terraform destroy -auto-approve -state=$tfstate -backup=- -var "name=$tfname" aws-init
# init actual backend with proper remote storage
terraform init \
-backend-config="bucket=$tfname" \
-backend-config="key=gitea/terraform.tfstate" \
-backend-config="dynamodb_table=$tfname"

34
vpc/variables.tf Normal file

@ -0,0 +1,34 @@
variable "cidr" {
description = "The CIDR block for the VPC. Default value is a valid CIDR, but not acceptable by AWS and should be overridden"
default = "0.0.0.0/0"
}
variable "cidr_subnet" {
description = "the cidr of the subnet"
#default = "0.0.0.0/0"
}
variable "tags" {
description = "A map of tags to add to all resources"
type = "map"
#default = {}
}
variable "az" {
description = "availability zones in the region"
default = ""
}
# A computed default
data "aws_availability_zones" "azs" {}
locals {
default_az = "${data.aws_availability_zones.azs.names[0]}"
az = "${var.az != "" ? var.az : local.default_az}"
}
# Outputs
output "subnet" {
value = "${aws_subnet.public.id}"
}
output "id" {
value = "${aws_vpc.this.id}"
}

36
vpc/vpc.tf Normal file

@ -0,0 +1,36 @@
# VPC
resource "aws_vpc" "this" {
tags = "${var.tags}"
cidr_block = "${var.cidr}"
enable_dns_support = true
enable_dns_hostnames = true
}
# Internet Gateway
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.this.id}"
tags = "${var.tags}"
}
# Publiс router
resource "aws_route_table" "public" {
vpc_id = "${aws_vpc.this.id}"
tags = "${var.tags}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
}
# Subnet
resource "aws_subnet" "public" {
vpc_id = "${aws_vpc.this.id}"
tags = "${var.tags}"
cidr_block = "${var.cidr_subnet}"
availability_zone = "${local.az}"
map_public_ip_on_launch = true
}
resource "aws_route_table_association" "public" {
subnet_id = "${aws_subnet.public.id}"
route_table_id = "${aws_route_table.public.id}"
}