diff --git a/.github/workflows/fmt_terraform.yml b/.github/workflows/fmt_terraform.yml new file mode 100644 index 00000000..967511fe --- /dev/null +++ b/.github/workflows/fmt_terraform.yml @@ -0,0 +1,48 @@ +name: Terraform Auto Format on Main + +on: + push: + branches: + - main + +permissions: + contents: write + +jobs: + terraform-fmt: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Install Terraform + run: | + wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" \ + | sudo tee /etc/apt/sources.list.d/hashicorp.list + sudo apt update && sudo apt install terraform + + - name: Terraform fmt + id: fmt + run: terraform fmt -recursive + continue-on-error: true + + - name: Check for changes + id: git-check + run: | + if [[ -n $(git status --porcelain) ]]; then + echo "changes=true" >> $GITHUB_OUTPUT + else + echo "changes=false" >> $GITHUB_OUTPUT + fi + + - name: Commit and push changes + if: steps.git-check.outputs.changes == 'true' + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" + git add . + git commit -m "chore(terraform): auto-format on merge to main" + git push diff --git a/tf/environments/dev/main.tf b/tf/environments/dev/main.tf index 1aca9365..a11c5067 100644 --- a/tf/environments/dev/main.tf +++ b/tf/environments/dev/main.tf @@ -244,7 +244,7 @@ resource "random_id" "artifact_id" { } resource "aws_s3_bucket" "anoncred_manifests" { - bucket = "ooni-anoncreds-manifests-dev-${var.aws_region}" + bucket = "ooni-anoncreds-manifests-dev-${var.aws_region}" object_lock_enabled = true versioning { enabled = true @@ -313,7 +313,7 @@ resource "aws_s3_bucket_acl" "anonc_manifests" { # Stored here to be publicly available, verifiable, and version controlled resource "aws_s3_object" "manifest" { bucket = aws_s3_bucket.anoncred_manifests.id - key = "manifest.json" + key = "manifest.json" content = jsonencode({ nym_scope = "ooni.org/{probe_cc}/{probe_asn}" submission_policy = { @@ -326,7 +326,7 @@ resource "aws_s3_object" "manifest" { # Test manifest used for integration tests resource "aws_s3_object" "test_manifest" { bucket = aws_s3_bucket.anoncred_manifests.id - key = "test_manifest.json" + key = "test_manifest.json" content = jsonencode({ nym_scope = "ooni.org/{probe_cc}/{probe_asn}" submission_policy = { @@ -401,8 +401,8 @@ module "ooniapi_cluster" { vpc_id = module.network.vpc_id subnet_ids = module.network.vpc_subnet_private[*].id - asg_min = 2 - asg_max = 4 + asg_min = 2 + asg_max = 4 instance_type = "t3a.micro" @@ -429,8 +429,8 @@ module "oonitier1plus_cluster" { vpc_id = module.network.vpc_id subnet_ids = module.network.vpc_subnet_private[*].id - asg_min = 1 - asg_max = 4 + asg_min = 1 + asg_max = 4 instance_type = "t3a.micro" @@ -547,13 +547,13 @@ module "ooniapi_ooniprobe" { # module.ooniapi_cluster.web_security_group_id ] - use_autoscaling = true + use_autoscaling = true service_desired_count = 1 - max_desired_count = 4 + max_desired_count = 4 autoscale_policies = [ { - resource_type = "memory" - name = "memory" + resource_type = "memory" + name = "memory" scaleout_treshold = 60 } ] @@ -1070,13 +1070,13 @@ module "ooniapi_oonimeasurements" { module.oonitier1plus_cluster.web_security_group_id ] - use_autoscaling = true + use_autoscaling = true service_desired_count = 1 - max_desired_count = 8 + max_desired_count = 8 autoscale_policies = [ { - name = "memory" - resource_type = "memory" + name = "memory" + resource_type = "memory" scaleout_treshold = 60 } ] diff --git a/tf/environments/prod/main.tf b/tf/environments/prod/main.tf index 718f6173..72fd2aeb 100644 --- a/tf/environments/prod/main.tf +++ b/tf/environments/prod/main.tf @@ -162,10 +162,10 @@ module "oonidevops_github_user" { module "oonipg" { source = "../../modules/postgresql" - name = "ooni-tier0-postgres" - aws_region = var.aws_region - vpc_id = module.network.vpc_id - subnet_ids = module.network.vpc_subnet_public[*].id + name = "ooni-tier0-postgres" + aws_region = var.aws_region + vpc_id = module.network.vpc_id + subnet_ids = module.network.vpc_subnet_public[*].id # By default, max_connections is computed as: # LEAST({DBInstanceClassMemory/9531392}, 5000) @@ -262,6 +262,16 @@ resource "aws_secretsmanager_secret_version" "oonipg_url" { ) } +module "geoip_bucket" { + source = "../../modules/s3_bucket" + + bucket_name = "ooni-geoip-${var.aws_region}-private-${local.environment}" + public_read = false + create_iam_user = true + versioning_enabled = false + object_lock_enabled = false +} + resource "random_id" "artifact_id" { byte_length = 4 } @@ -282,8 +292,9 @@ resource "aws_s3_bucket" "ooni_private_config_bucket" { bucket = "ooni-config-${var.aws_region}-${random_id.artifact_id.hex}" } + resource "aws_s3_bucket" "anoncred_manifests" { - bucket = "ooni-anoncreds-manifests-${var.aws_region}" + bucket = "ooni-anoncreds-manifests-${var.aws_region}" object_lock_enabled = true versioning { enabled = true @@ -352,7 +363,7 @@ resource "aws_s3_bucket_acl" "anonc_manifests" { # Stored here to be publicly available, verifiable, and version controlled resource "aws_s3_object" "manifest" { bucket = aws_s3_bucket.anoncred_manifests.id - key = "manifest.json" + key = "manifest.json" content = jsonencode({ nym_scope = "ooni.org/{probe_cc}/{probe_asn}" submission_policy = { @@ -484,14 +495,14 @@ module "ooni_clickhouse_proxy" { protocol = "tcp", cidr_blocks = ["0.0.0.0/0"], }, { - from_port = 9000, - to_port = 9000, - protocol = "tcp", + from_port = 9000, + to_port = 9000, + protocol = "tcp", cidr_blocks = concat( module.network.vpc_subnet_public[*].cidr_block, module.network.vpc_subnet_private[*].cidr_block, ["${module.ooni_fastpath.aws_instance_private_ip}/32", - "${module.ooni_fastpath.aws_instance_public_ip}/32"] + "${module.ooni_fastpath.aws_instance_public_ip}/32"] ), }, { // For the prometheus proxy: @@ -614,8 +625,8 @@ module "ooniapi_cluster" { subnet_ids = module.network.vpc_subnet_public[*].id # You need be careful how these are tweaked. - asg_min = 2 - asg_max = 10 + asg_min = 2 + asg_max = 10 instance_type = "t3a.medium" @@ -642,8 +653,8 @@ module "oonitier1plus_cluster" { vpc_id = module.network.vpc_id subnet_ids = module.network.vpc_subnet_private[*].id - asg_min = 2 - asg_max = 5 + asg_min = 2 + asg_max = 5 instance_type = "t3a.medium" @@ -733,7 +744,7 @@ module "ooniapi_ooniprobe" { dns_zone_ooni_io = local.dns_zone_ooni_io key_name = module.adm_iam_roles.oonidevops_key_name ecs_cluster_id = module.ooniapi_cluster.cluster_id - task_memory = 256 + task_memory = 256 task_secrets = { @@ -758,13 +769,13 @@ module "ooniapi_ooniprobe" { module.ooniapi_cluster.web_security_group_id ] - use_autoscaling = true + use_autoscaling = true service_desired_count = 2 - max_desired_count = 8 + max_desired_count = 8 autoscale_policies = [ { - resource_type = "memory" - name = "memory" + resource_type = "memory" + name = "memory" scaleout_treshold = 60 } ] @@ -1092,13 +1103,13 @@ module "ooniapi_oonimeasurements" { module.ooniapi_cluster.web_security_group_id ] - use_autoscaling = true + use_autoscaling = true service_desired_count = 4 - max_desired_count = 32 # 8gb (total mem) / 256mb (mem per task) = 32 tasks + max_desired_count = 32 # 8gb (total mem) / 256mb (mem per task) = 32 tasks autoscale_policies = [ { - name = "memory" - resource_type = "memory" + name = "memory" + resource_type = "memory" scaleout_treshold = 60 } ] diff --git a/tf/modules/ansible_controller/main.tf b/tf/modules/ansible_controller/main.tf index a2607570..96559cd6 100644 --- a/tf/modules/ansible_controller/main.tf +++ b/tf/modules/ansible_controller/main.tf @@ -12,9 +12,9 @@ resource "aws_security_group" "ansible_ctrl_sg" { } ingress { - protocol = "tcp" - from_port = 9100 - to_port = 9100 + protocol = "tcp" + from_port = 9100 + to_port = 9100 security_groups = var.monitoring_sg_ids } @@ -64,7 +64,7 @@ resource "aws_instance" "ansible_controller" { vpc_security_group_ids = [aws_security_group.ansible_ctrl_sg.id] - tags = merge({ Name = "ansible-controller", MonitoringActive = var.monitoring_active}, var.tags) + tags = merge({ Name = "ansible-controller", MonitoringActive = var.monitoring_active }, var.tags) } resource "aws_route53_record" "oonith_service_alias" { diff --git a/tf/modules/ansible_controller/variables.tf b/tf/modules/ansible_controller/variables.tf index eeaec922..7d7251e3 100644 --- a/tf/modules/ansible_controller/variables.tf +++ b/tf/modules/ansible_controller/variables.tf @@ -27,12 +27,12 @@ variable "dns_zone_ooni_io" { variable "monitoring_sg_ids" { description = "Ids of the security groups used for monitoring" - default = [] - type = list(string) + default = [] + type = list(string) } variable "monitoring_active" { description = "If the monitoring system should consider the ansible controller machine. Set it to 'true' to activate it, anything else to deactivate it" - default = "true" - type = string + default = "true" + type = string } \ No newline at end of file diff --git a/tf/modules/cloudhsm/main.tf b/tf/modules/cloudhsm/main.tf index beeacf1d..c6f12abb 100644 --- a/tf/modules/cloudhsm/main.tf +++ b/tf/modules/cloudhsm/main.tf @@ -58,7 +58,7 @@ resource "aws_instance" "codesign_box" { rm cloudhsm-pkcs11.rpm EOF - tags = merge(var.tags, { Name = "codesign-box" , MonitoringActive = var.monitoring_active}) + tags = merge(var.tags, { Name = "codesign-box", MonitoringActive = var.monitoring_active }) // NOTE: remove the ignore_changes rule to deploy lifecycle { @@ -87,7 +87,7 @@ resource "aws_launch_template" "codesign_box_template" { resource_type = "instance" tags = { - Name = "codesign-box" + Name = "codesign-box" MonitoringActive = var.monitoring_active } } diff --git a/tf/modules/cloudhsm/variables.tf b/tf/modules/cloudhsm/variables.tf index 3a77b5dd..27a905c6 100644 --- a/tf/modules/cloudhsm/variables.tf +++ b/tf/modules/cloudhsm/variables.tf @@ -29,6 +29,6 @@ variable "tags" { variable "monitoring_active" { description = "If the monitoring system should consider the HSM machine. Set it to 'true' to activate it, anything else to deactivate it" - default = "true" - type = string + default = "true" + type = string } \ No newline at end of file diff --git a/tf/modules/ec2/main.tf b/tf/modules/ec2/main.tf index b73bdec6..25ada865 100644 --- a/tf/modules/ec2/main.tf +++ b/tf/modules/ec2/main.tf @@ -20,25 +20,25 @@ resource "aws_security_group" "ec2_sg" { resource "aws_security_group_rule" "ec2_sg_ingress" { count = length(var.ingress_rules) - type = "ingress" + type = "ingress" from_port = var.ingress_rules[count.index].from_port to_port = var.ingress_rules[count.index].to_port protocol = var.ingress_rules[count.index].protocol cidr_blocks = var.ingress_rules[count.index].cidr_blocks ipv6_cidr_blocks = var.ingress_rules[count.index].ipv6_cidr_blocks - security_group_id = aws_security_group.ec2_sg.id + security_group_id = aws_security_group.ec2_sg.id } resource "aws_security_group_rule" "ec2_sg_egress" { count = length(var.egress_rules) - type = "egress" + type = "egress" from_port = var.egress_rules[count.index].from_port to_port = var.egress_rules[count.index].to_port protocol = var.egress_rules[count.index].protocol cidr_blocks = var.egress_rules[count.index].cidr_blocks ipv6_cidr_blocks = var.egress_rules[count.index].ipv6_cidr_blocks - security_group_id = aws_security_group.ec2_sg.id + security_group_id = aws_security_group.ec2_sg.id } data "cloudinit_config" "ooni_ec2" { @@ -47,7 +47,7 @@ data "cloudinit_config" "ooni_ec2" { part { filename = "init.cfg" content_type = "text/cloud-config" - content = templatefile("${path.module}/templates/cloud-init.yml", {}) + content = templatefile("${path.module}/templates/cloud-init.yml", {}) } } @@ -87,15 +87,15 @@ resource "aws_instance" "ooni_ec2" { lifecycle { create_before_destroy = true - ignore_changes = [ user_data, launch_template ] + ignore_changes = [user_data, launch_template] } root_block_device { - volume_size = var.disk_size # Size in GB + volume_size = var.disk_size # Size in GB volume_type = "gp2" } - tags = merge(var.tags, {MonitoringActive = var.monitoring_active}) + tags = merge(var.tags, { MonitoringActive = var.monitoring_active }) } resource "aws_alb_target_group" "ooni_ec2" { diff --git a/tf/modules/ec2/outputs.tf b/tf/modules/ec2/outputs.tf index 3acd1293..a09c3362 100644 --- a/tf/modules/ec2/outputs.tf +++ b/tf/modules/ec2/outputs.tf @@ -3,7 +3,7 @@ output "aws_instance_id" { } output "aws_instance_public_dns" { - value = aws_instance.ooni_ec2.public_dns + value = aws_instance.ooni_ec2.public_dns } output "ec2_sg_id" { diff --git a/tf/modules/ec2/variables.tf b/tf/modules/ec2/variables.tf index f8b30d78..d6a30e0f 100644 --- a/tf/modules/ec2/variables.tf +++ b/tf/modules/ec2/variables.tf @@ -10,7 +10,7 @@ variable "private_subnet_cidr" { description = "the cidr block of the private subnet to allow traffic from for the clickhouse proxy" } - variable "tags" { +variable "tags" { description = "tags to apply to the resources" default = {} type = map(string) @@ -37,41 +37,41 @@ variable "dns_zone_ooni_io" { } variable "sg_prefix" { - description = "security group prefix" + description = "security group prefix" } variable "ingress_rules" { type = list(object({ - from_port = number - to_port = number - protocol = string - cidr_blocks = list(string) - ipv6_cidr_blocks = optional(list(string)) - })) + from_port = number + to_port = number + protocol = string + cidr_blocks = list(string) + ipv6_cidr_blocks = optional(list(string)) + })) } variable "egress_rules" { type = list(object({ - from_port = number - to_port = number - protocol = string - cidr_blocks = optional(list(string)) - ipv6_cidr_blocks = optional(list(string)) - })) + from_port = number + to_port = number + protocol = string + cidr_blocks = optional(list(string)) + ipv6_cidr_blocks = optional(list(string)) + })) } variable "tg_prefix" { - description = "target group prefix. Will be prefixed with `oo`, example: bkprx -> oobkprx" + description = "target group prefix. Will be prefixed with `oo`, example: bkprx -> oobkprx" } variable "monitoring_active" { description = "If the monitoring system should consider this machine. Set it to 'true' to activate it, anything else to deactivate it" - default = "true" - type = string + default = "true" + type = string } variable "disk_size" { description = "Available disk space for this machine, in GB. Defaults to 8gb" - default = 8 - type = number + default = 8 + type = number } \ No newline at end of file diff --git a/tf/modules/ecs_cluster/main.tf b/tf/modules/ecs_cluster/main.tf index b95fb7ea..2fde9871 100644 --- a/tf/modules/ecs_cluster/main.tf +++ b/tf/modules/ecs_cluster/main.tf @@ -117,14 +117,14 @@ resource "aws_security_group" "container_host" { security_groups = concat([ aws_security_group.web.id, - ], + ], var.monitoring_sg_ids) } ingress { - protocol = "tcp" + protocol = "tcp" from_port = 9100 - to_port = 9100 + to_port = 9100 security_groups = var.monitoring_sg_ids } @@ -152,8 +152,8 @@ resource "aws_launch_template" "container_host" { instance_type = var.instance_type user_data = base64encode(templatefile("${path.module}/templates/ecs-setup.sh", { - ecs_cluster_name = var.name, - ecs_cluster_tags = var.tags + ecs_cluster_name = var.name, + ecs_cluster_tags = var.tags node_exporter_port = var.node_exporter_port })) @@ -228,17 +228,17 @@ resource "aws_ecs_capacity_provider" "capacity_provider" { name = "${var.name}-capacity-provider" auto_scaling_group_provider { - auto_scaling_group_arn = aws_autoscaling_group.container_host.arn - managed_termination_protection = "ENABLED" - # managed_draining = "ENABLED" - - managed_scaling { - maximum_scaling_step_size = 1000 - minimum_scaling_step_size = 1 - status = "ENABLED" - target_capacity = 100 - } + auto_scaling_group_arn = aws_autoscaling_group.container_host.arn + managed_termination_protection = "ENABLED" + # managed_draining = "ENABLED" + + managed_scaling { + maximum_scaling_step_size = 1000 + minimum_scaling_step_size = 1 + status = "ENABLED" + target_capacity = 100 } + } } // You also need to link the capacity provider to the cluster diff --git a/tf/modules/ecs_cluster/variables.tf b/tf/modules/ecs_cluster/variables.tf index a9669ebf..69fc483c 100644 --- a/tf/modules/ecs_cluster/variables.tf +++ b/tf/modules/ecs_cluster/variables.tf @@ -59,7 +59,7 @@ variable "instance_volume_size" { variable "monitoring_sg_ids" { default = [] - type = list(string) + type = list(string) } variable "node_exporter_port" { @@ -68,6 +68,6 @@ variable "node_exporter_port" { variable "monitoring_active" { description = "If the monitoring system should consider cluster machines. Set it to 'true' to activate it, anything else to deactivate it" - default = "true" - type = string + default = "true" + type = string } diff --git a/tf/modules/network/main.tf b/tf/modules/network/main.tf index f224fda2..4a67bd69 100644 --- a/tf/modules/network/main.tf +++ b/tf/modules/network/main.tf @@ -7,7 +7,7 @@ resource "aws_vpc" "main" { cidr_block = var.vpc_main_cidr_block enable_dns_hostnames = true enable_dns_support = true - + assign_generated_ipv6_cidr_block = true tags = var.tags @@ -16,8 +16,8 @@ resource "aws_vpc" "main" { resource "aws_subnet" "public" { count = var.az_count - cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index) - ipv6_cidr_block = cidrsubnet(aws_vpc.main.ipv6_cidr_block, 8, count.index) + cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.main.ipv6_cidr_block, 8, count.index) availability_zone = element(var.aws_availability_zones_available.names, count.index) vpc_id = aws_vpc.main.id @@ -39,7 +39,7 @@ resource "aws_subnet" "private" { cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, local.private_net_offset + count.index) - ipv6_cidr_block = cidrsubnet(aws_vpc.main.ipv6_cidr_block, 8, local.private_net_offset + count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.main.ipv6_cidr_block, 8, local.private_net_offset + count.index) availability_zone = element(var.aws_availability_zones_available.names, count.index) vpc_id = aws_vpc.main.id diff --git a/tf/modules/ooni_backendproxy/main.tf b/tf/modules/ooni_backendproxy/main.tf index a5674a60..7095a751 100644 --- a/tf/modules/ooni_backendproxy/main.tf +++ b/tf/modules/ooni_backendproxy/main.tf @@ -11,9 +11,9 @@ resource "aws_security_group" "nginx_sg" { vpc_id = var.vpc_id ingress { - protocol = "tcp" - from_port = 9000 - to_port = 9000 + protocol = "tcp" + from_port = 9000 + to_port = 9000 } ingress { diff --git a/tf/modules/ooni_backendproxy/variables.tf b/tf/modules/ooni_backendproxy/variables.tf index 870b547c..9d71bd06 100644 --- a/tf/modules/ooni_backendproxy/variables.tf +++ b/tf/modules/ooni_backendproxy/variables.tf @@ -10,7 +10,7 @@ variable "private_subnet_cidr" { description = "the cidr block of the private subnet to allow traffic from for the clickhouse proxy" } - variable "tags" { +variable "tags" { description = "tags to apply to the resources" default = {} type = map(string) @@ -56,7 +56,7 @@ variable "dns_zone_ooni_io" { variable "clickhouse_url" { description = "clickhouse url to proxy requests to" - default = "backend-fsn.ooni.org" + default = "backend-fsn.ooni.org" } variable "clickhouse_port" { diff --git a/tf/modules/ooni_docker_build/main.tf b/tf/modules/ooni_docker_build/main.tf index 8beb5ad7..b2c3d741 100644 --- a/tf/modules/ooni_docker_build/main.tf +++ b/tf/modules/ooni_docker_build/main.tf @@ -206,7 +206,7 @@ resource "aws_codepipeline" "oonidkr" { git_configuration { source_action_name = "Source" - + push { branches { includes = [var.branch_name] diff --git a/tf/modules/ooni_monitoring/main.tf b/tf/modules/ooni_monitoring/main.tf index b4de25dc..888f0e13 100644 --- a/tf/modules/ooni_monitoring/main.tf +++ b/tf/modules/ooni_monitoring/main.tf @@ -2,7 +2,7 @@ locals { name = "ecs-service-discovery-${var.environment}" tags = { - Name = local.name + Name = local.name Environment = var.environment } } @@ -42,13 +42,13 @@ resource "aws_iam_access_key" "ooni_monitoring" { } resource "aws_ssm_parameter" "ooni_monitoring_access_key" { - name = "/oonidevops/secrets/ooni_monitoring/access_key" - type = "SecureString" + name = "/oonidevops/secrets/ooni_monitoring/access_key" + type = "SecureString" value = aws_iam_access_key.ooni_monitoring.id } resource "aws_ssm_parameter" "ooni_monitoring_secret_key" { - name = "/oonidevops/secrets/ooni_monitoring/secret_key" - type = "SecureString" + name = "/oonidevops/secrets/ooni_monitoring/secret_key" + type = "SecureString" value = aws_iam_access_key.ooni_monitoring.secret } diff --git a/tf/modules/ooni_monitoring/variables.tf b/tf/modules/ooni_monitoring/variables.tf index f49676a0..eba1b7a9 100644 --- a/tf/modules/ooni_monitoring/variables.tf +++ b/tf/modules/ooni_monitoring/variables.tf @@ -10,11 +10,11 @@ variable "environment" { variable "task_memory" { description = "How much memory to allocate for this task" - type = number - default = 64 + type = number + default = 64 } variable "aws_region" { description = "AWS region" - type = string + type = string } diff --git a/tf/modules/ooni_th_droplet/main.tf b/tf/modules/ooni_th_droplet/main.tf index 9836ac62..74413a6f 100644 --- a/tf/modules/ooni_th_droplet/main.tf +++ b/tf/modules/ooni_th_droplet/main.tf @@ -34,7 +34,7 @@ resource "digitalocean_droplet" "ooni_th_docker" { lifecycle { create_before_destroy = true - ignore_changes = all + ignore_changes = all } } resource "aws_route53_record" "ooni_th" { diff --git a/tf/modules/ooniapi_frontend/main.tf b/tf/modules/ooniapi_frontend/main.tf index 93de70fc..7f65233c 100644 --- a/tf/modules/ooniapi_frontend/main.tf +++ b/tf/modules/ooniapi_frontend/main.tf @@ -67,12 +67,12 @@ resource "aws_s3_bucket_policy" "alb_logs_policy" { Version = "2012-10-17" Statement = [ { - Sid = "AWSLoadBalancerLogging" - Effect = "Allow" + Sid = "AWSLoadBalancerLogging" + Effect = "Allow" Principal = { AWS = "arn:aws:iam::${var.region_to_account_id[var.aws_region]}:root" } - Action = "s3:PutObject" + Action = "s3:PutObject" Resource = "${aws_s3_bucket.load_balancer_logs.arn}/*" } ] @@ -107,8 +107,8 @@ resource "aws_athena_database" "load_balancer_logs" { } resource "aws_athena_named_query" "create_alb_logs_table" { - name = "create_alb_logs_table" - database = aws_athena_database.load_balancer_logs.name + name = "create_alb_logs_table" + database = aws_athena_database.load_balancer_logs.name query = <