Friday, March 24, 2023

Terraform code for creating a new VPC with secondary CIDR (for Kubernetes for example)

 

I put this as a module, creates a VPC with 3 public and 3 private subnets, IGW, 1 Nat gateway for each private subnet, and a seperate routing table for each private subnet.  It also creates a flowlog, and KMS key for encryption, otherwise it wouldn't pass security checks :) 


Put code below as your module, and change the "source" line to whereever you put main.tf, inputs.tf and outputs.tf. 

This was written around terraform version 1.3 


#terraform plan ; terraform apply 

module "vpc-3az-plus-secondary" {

    source="../modules/platform/vpc-3az-secondary-cidr/"
    EnvironmentName="whatever-you-need-dev-stage-or-prod"
    VpcRegion="us-east-1"
    VpcCIDR="22.125.106.0/24"
    SecondaryVpcCIDR="100.64.0.0/20"
    PublicSubnet1CIDR = "22.125.106.192/28"
    PublicSubnet2CIDR =  "22.125.106.208/28"
    PublicSubnet3CIDR =   "22.125.106.224/28"
    PrivateSubnet1CIDR =  "22.125.106.0/26"
    PrivateSubnet2CIDR =   "22.125.106.64/26"
    PrivateSubnet3CIDR =  "22.125.106.128/26"
    SecondaryPrivateSubnet1CIDR = "100.64.0.0/23"
    SecondaryPrivateSubnet2CIDR = "100.64.2.0/23"
    SecondaryPrivateSubnet3CIDR = "100.64.4.0/23"
}
here are the files:
Main.tf:
data "aws_availability_zones" "AZ" {
  state = "available"
}
resource "random_uuid" ""bm2023 {
}
resource "aws_vpc" "main" {
  cidr_block = var.VpcCIDR
  tags = {
    Name = var.EnvironmentName
  }
}
resource "aws_default_security_group" "default" {
  vpc_id = aws_vpc.main.id
}
resource "aws_flow_log" "flowlogs" {
  iam_role_arn    = aws_iam_role.vpcflowlogrole.arn
  log_destination = aws_cloudwatch_log_group.vpcflowloggroup.arn
  traffic_type    = "ALL"
  vpc_id          = aws_vpc.main.id
}
data "aws_caller_identity" "current" {}
locals {
  account_id     = data.aws_caller_identity.current.account_id
}
resource "aws_cloudwatch_log_group" "vpcflowloggroup" {
  name = "vpcflowlog-${var.EnvironmentName}-${random_uuid.bm2023.result}"
  retention_in_days = 14
  depends_on = [
    aws_kms_key.kmskey_cw_forloggroup]
  kms_key_id= aws_kms_key.kmskey_cw_forloggroup.arn

}
resource "aws_kms_key" "kmskey_cw_forloggroup" {
  description             = "KMS key for ${var.EnvironmentName} cloudwatch log group"
  enable_key_rotation     = true
  deletion_window_in_days = 10
   tags = {
    Name = "${var.EnvironmentName}-cloudwatch-loggroup"
  }
  policy = <<EOF
{
    "Id": "key-consolepolicy-3",
    "Version": "2012-10-17",
    "Statement": [
        {

            "Sid": "Enable IAM User Permissions",
            "Effect": "Allow",
            "Principal": {
                "AWS": "arn:aws:iam::${local.account_id}:root"
            },
            "Action": "kms:*",
            "Resource": "*"
        },
        {
             "Sid": "Enable Cloud watch log group access",
    "Effect": "Allow",
    "Principal": {
        "Service": "logs.${var.vpcRegion}.amazonaws.com"
    },
    "Action": [
        "kms:Encrypt*",
        "kms:Decrypt*",
        "kms:ReEncrypt*",
        "kms:GenerateDataKey*",
        "kms:Describe*"
    ],
    "Resource": "*",
    "Condition": {
        "ArnEquals": {
            "kms:EncryptionContext:aws:logs:arn": "arn:aws:logs:${var.vpcRegion}:${local.account_id}:log-group:*"
        }
    }
    }
    ]
}
EOF
}
resource "aws_iam_role" "vpcflowlogrole" {
  name = "vpcflowlogrole-${var.EnvironmentName}"

  assume_role_policy = <<EOF
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Sid": "",
      "Effect": "Allow",
      "Principal": {
        "Service": "vpc-flow-logs.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}
EOF
}

resource "aws_iam_role_policy" "vpcflowlogrolepolicy" {
  name = "vpcflowlogrole-${var.EnvironmentName}"
  role = aws_iam_role.vpcflowlogrole.id

  policy = <<EOF
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Action": [
        "logs:CreateLogGroup",
        "logs:CreateLogStream",
        "logs:PutLogEvents",
        "logs:DescribeLogGroups",
        "logs:DescribeLogStreams"
      ],
      "Effect": "Allow",
      "Resource": "${aws_cloudwatch_log_group.vpcflowloggroup.arn}"
    }
  ]
}
EOF
}

resource "aws_internet_gateway" "igw" {
 vpc_id = aws_vpc.main.id

 tags = {
   Name = var.EnvironmentName
 }
}
resource "aws_subnet" "PublicSubnet1" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,0)
  map_public_ip_on_launch = false
  cidr_block = var.PublicSubnet1CIDR
   tags = {
   Name = "${var.EnvironmentName} Public Subnet (AZ1)"
   subnet_type = "public"
 }
}
resource "aws_subnet" "PublicSubnet2" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,1)
  map_public_ip_on_launch = false
  cidr_block = var.PublicSubnet2CIDR
     tags = {
   Name = "${var.EnvironmentName} Public Subnet (AZ2)"
    subnet_type = "public"
 }
}
resource "aws_subnet" "PublicSubnet3" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,2)
  map_public_ip_on_launch = false
  cidr_block = var.PublicSubnet3CIDR
     tags = {
   Name = "${var.EnvironmentName} Public Subnet (AZ3)"
    subnet_type = "public"
 }
}
resource "aws_subnet" "PrivateSubnet1" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,0)
  cidr_block = var.PrivateSubnet1CIDR
   tags = {
   Name = "${var.EnvironmentName} Private Subnet (AZ1)"
    subnet_type = "private"
 }
}
resource "aws_subnet" "PrivateSubnet2" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,1)
  cidr_block = var.PrivateSubnet2CIDR
   tags = {
   Name = "${var.EnvironmentName} Private Subnet (AZ2)"
   subnet_type = "private"
 }
}
resource "aws_subnet" "PrivateSubnet3" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,2)
  cidr_block = var.PrivateSubnet3CIDR
   tags = {
   Name = "${var.EnvironmentName} Private Subnet (AZ3)"
   subnet_type = "private"
 }
}
resource "aws_vpc_ipv4_cidr_block_association" "secondary_cidr" {
  vpc_id     = aws_vpc.main.id
  cidr_block = var.SecondaryVpcCIDR
}
resource "aws_subnet" "SecondaryPrivateSubnet1" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,0)
  cidr_block = var.SecondaryPrivateSubnet1CIDR
   tags = {
   Name = "${var.EnvironmentName} Private Subnet (AZ1) Secondary"
 }
}
resource "aws_subnet" "SecondaryPrivateSubnet2" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,1)
  cidr_block = var.SecondaryPrivateSubnet2CIDR
   tags = {
   Name = "${var.EnvironmentName} Private Subnet (AZ2) Secondary"
 }
}
resource "aws_subnet" "SecondaryPrivateSubnet3" {
  vpc_id = aws_vpc.main.id
  availability_zone = element(data.aws_availability_zones.AZ.names,2)
  cidr_block = var.SecondaryPrivateSubnet3CIDR
   tags = {
   Name = "${var.EnvironmentName} Private Subnet (AZ3) Secondary"
 }
}

resource "aws_eip" "NatGateway1EIP" {
  vpc        = true
  depends_on = [aws_internet_gateway.igw]
}
resource "aws_eip" "NatGateway2EIP" {
  vpc        = true
  depends_on = [aws_internet_gateway.igw]
}
resource "aws_eip" "NatGateway3EIP" {
  vpc        = true
  depends_on = [aws_internet_gateway.igw]
}
resource "aws_nat_gateway" "NatGateway1" {
  allocation_id = aws_eip.NatGateway1EIP.id
  subnet_id     = aws_subnet.PublicSubnet1.id
  depends_on    = [aws_internet_gateway.igw]
  tags = {
    Name = var.EnvironmentName
  }
}
resource "aws_nat_gateway" "NatGateway2" {
  allocation_id = aws_eip.NatGateway2EIP.id
  subnet_id     = aws_subnet.PublicSubnet2.id
  depends_on    = [aws_internet_gateway.igw]
  tags = {
    Name = var.EnvironmentName
  }
}
resource "aws_nat_gateway" "NatGateway3" {
  allocation_id = aws_eip.NatGateway3EIP.id
  subnet_id     = aws_subnet.PublicSubnet3.id
  depends_on    = [aws_internet_gateway.igw]
  tags = {
    Name = var.EnvironmentName
  }
}
resource "aws_route_table" "PublicRouteTable" {
  vpc_id = aws_vpc.main.id
   tags = {
    Name = "${var.EnvironmentName} Public Routes"
  }
}
resource "aws_route_table" "PrivateRouteTable1" {
  vpc_id = aws_vpc.main.id
   tags = {
    Name = "${var.EnvironmentName} Private Routes (AZ1)"
  }
}

resource "aws_route_table" "PrivateRouteTable2" {
  vpc_id = aws_vpc.main.id
   tags = {
    Name = "${var.EnvironmentName} Private Routes (AZ2)"
  }
}

resource "aws_route_table" "PrivateRouteTable3" {
  vpc_id = aws_vpc.main.id
   tags = {
    Name = "${var.EnvironmentName} Private Routes (AZ3)"
  }
}
resource "aws_route_table" "SecondaryPrivateRouteTable1" {
  vpc_id = aws_vpc.main.id
   tags = {
    Name = "${var.EnvironmentName} Private Routes (AZ1)"
  }
}

resource "aws_route_table" "SecondaryPrivateRouteTable2" {
  vpc_id = aws_vpc.main.id
   tags = {
    Name = "${var.EnvironmentName} Private Routes (AZ2)"
  }
}

resource "aws_route_table" "SecondaryPrivateRouteTable3" {
  vpc_id = aws_vpc.main.id
   tags = {
    Name = "${var.EnvironmentName} Private Routes (AZ3)"
  }
}
resource "aws_route" "DefaultPublicRoute" {
  route_table_id = aws_route_table.PublicRouteTable.id
  destination_cidr_block = "0.0.0.0/0"
  gateway_id = aws_internet_gateway.igw.id
  depends_on    = [aws_internet_gateway.igw]
}
resource "aws_route_table_association" "PublicSubnet1RouteTableAssociation" {
  route_table_id = aws_route_table.PublicRouteTable.id
  subnet_id = aws_subnet.PublicSubnet1.id
}
resource "aws_route_table_association" "PublicSubnet2RouteTableAssociation" {
  route_table_id = aws_route_table.PublicRouteTable.id
  subnet_id = aws_subnet.PublicSubnet2.id
}
resource "aws_route_table_association" "PublicSubnet3RouteTableAssociation" {
  route_table_id = aws_route_table.PublicRouteTable.id
  subnet_id = aws_subnet.PublicSubnet3.id
}
resource "aws_route" "DefaultPrivateRoute1" {
  route_table_id         = aws_route_table.PrivateRouteTable1.id
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = aws_nat_gateway.NatGateway1.id
}
resource "aws_route" "DefaultPrivateRoute2" {
  route_table_id         = aws_route_table.PrivateRouteTable2.id
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = aws_nat_gateway.NatGateway2.id
}
resource "aws_route" "DefaultPrivateRoute3" {
  route_table_id         = aws_route_table.PrivateRouteTable3.id
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = aws_nat_gateway.NatGateway3.id
}

resource "aws_route" "SecondaryDefaultPrivateRoute1" {
  route_table_id         = aws_route_table.SecondaryPrivateRouteTable1.id
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = aws_nat_gateway.NatGateway1.id
}
resource "aws_route" "SecondaryDefaultPrivateRoute2" {
  route_table_id         = aws_route_table.SecondaryPrivateRouteTable2.id
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = aws_nat_gateway.NatGateway2.id
}
resource "aws_route" "SecondaryDefaultPrivateRoute3" {
  route_table_id         = aws_route_table.SecondaryPrivateRouteTable3.id
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = aws_nat_gateway.NatGateway3.id
}
resource "aws_route_table_association" "PrivateSubnet1RouteTableAssociation" {
  route_table_id = aws_route_table.PrivateRouteTable1.id
  subnet_id = aws_subnet.PrivateSubnet1.id
}
resource "aws_route_table_association" "PrivateSubnet2RouteTableAssociation" {
  route_table_id = aws_route_table.PrivateRouteTable2.id
  subnet_id = aws_subnet.PrivateSubnet2.id
}
resource "aws_route_table_association" "PrivateSubnet3RouteTableAssociation" {
  route_table_id = aws_route_table.PrivateRouteTable3.id
  subnet_id = aws_subnet.PrivateSubnet3.id
}
resource "aws_route_table_association" "SecondaryPrivateSubnet1RouteTableAssociation" {
  route_table_id = aws_route_table.SecondaryPrivateRouteTable1.id
  subnet_id = aws_subnet.SecondaryPrivateSubnet1.id
}
resource "aws_route_table_association" "SecondaryPrivateSubnet2RouteTableAssociation" {
  route_table_id = aws_route_table.SecondaryPrivateRouteTable2.id
  subnet_id = aws_subnet.SecondaryPrivateSubnet2.id
}
resource "aws_route_table_association" "SecondaryPrivateSubnet3RouteTableAssociation" {
  route_table_id = aws_route_table.SecondaryPrivateRouteTable3.id
  subnet_id = aws_subnet.SecondaryPrivateSubnet3.id
}

Outputs.tf:
output "EnvironmentName" {
  value = var.EnvironmentName
}
output "vpcId" {
    value = aws_vpc.main.id
}
output "vpc_cidr" {
    value = var.VpcCIDR
}
output "aws_cloudwatch_log_group" {
    value = aws_cloudwatch_log_group.vpcflowloggroup
}
output "kms_key" {
    value = aws_kms_key.kmskey_cw_forloggroup
}
output "aws_flow_log" {
    value = aws_flow_log.flowlogs
}
output "Secondary_vpc_cidr" {
    value = var.SecondaryVpcCIDR
}
output "PublicSubnet1" {
  value = aws_subnet.PublicSubnet1.id
}
output "PublicSubnet2" {
  value = aws_subnet.PublicSubnet2.id
}
output "PublicSubnet3" {
  value = aws_subnet.PublicSubnet3.id
}
output "public_subnet_route_table_id" {
    value = aws_route_table.PublicRouteTable.id
}
output "PrivateSubnet1" {
    value = aws_subnet.PrivateSubnet1.id
}
output "private_subnet1_route_table_id" {
    value = aws_route_table.PrivateRouteTable1.id
}
output "NatGatewayip_public_cidr1" {
    value = aws_nat_gateway.NatGateway1.id

}
output "PrivateSubnet2" {
    value = aws_subnet.PrivateSubnet2.id
}
output "private_subnet2_route_table_id" {
    value = aws_route_table.PrivateRouteTable2.id
}
output "NatGatewayip_public_cidr2" {
    value = aws_nat_gateway.NatGateway2.id
}
output "PrivateSubnet3" {
    value = aws_subnet.PrivateSubnet3.id
}
output "private_subnet3_route_table_id" {
    value = aws_route_table.PrivateRouteTable3.id
}
output "NatGatewayip_public_cidr3" {
    value = aws_nat_gateway.NatGateway3.id
}
output "SecondaryPrivateSubnet1" {
  value = aws_subnet.SecondaryPrivateSubnet1.id
}
output "SecondaryPrivateSubnet2" {
  value = aws_subnet.SecondaryPrivateSubnet2.id
}
output "SecondaryPrivateSubnet3" {
  value = aws_subnet.SecondaryPrivateSubnet3.id
}

*** This assumes you have a pipeline, that has something along the following in the git/repo directory for that pipeline:

terraform { required_providers { aws = { source = "hashicorp/aws" version = "~> 4.0" } } } provider "aws" { region = "us-east-1" # Role in remote account: assume_role { role_arn = "arn:aws:iam::00000000000:role/yourrole-terraform-automation" } default_tags { tags = { automation_provider = "codepipeline" automation_account_number = "11111111111111" automation_account_name = "Deployment-Acct" automation_github_url = "https://github.com/tree/terraform/" automation_github_env = "11111111111111-us-east-1" automation_pipeline = "11111111111111-us-east-1" automation_region = "us-east-1" department = "Cloud DevOPS" } } }

limiting ingest of logs to Splunk with FluentBit and Docker

Requirement:  We wanted to send logs to splunk (A LOT of logs) and to do it with a docker container running fluentbit.  

*for a more detailed discussion of why use Fluent-bit vs FluentD, you can check out https://logz.io/blog/fluentd-vs-fluent-bit/.

 However for this scenario Fluent-Bit was the better choice. 


Deliverable:  Each VM has 5-20 Docker containers on it, and one of them will be the fluent-bit container.  It will send the logs from all the containers required, not exceeding 500Gb a day, a quota shared by the amount of containers.  Example: if there are 10 containers, then each can send 50 a day, however if there are 15 containers, each can send 33.3Gb a day.   

If a container is stopped, we need to know at what point the stop happened (this happens through the fluentbit configuration). 


Architecture of System:

The Dockerfile builds a container which monitors logs on a VM which runs between 1 and 20 containers on it.  (there is no “hard” limit, can be 100 containers or more)

Since docker container terminate after the main process on them is sent a SIGHUP or any signal 1-15, we had to use a supervisord program which run as PID 1, and encapsulates the fluentbit process within it.


Supervisord:

        https://gdevillele.github.io/engine/admin/using_supervisord/

       http://supervisord.org/


This is the structure of the container build:

•       Dockerfile – the file from which the container is built

•       config.sh – main configuration file for the monitor-log.sh script

•       fluent-bit.conf – initial fluent-bit configuration

•       inputs.conf – the file which defines all the containers

•       metadata.lua – filtering file

•       monitor-log.sh – main script which runs every 10min

•       oneopsscript.sh – script file that strips the logs to what is needed.

•       supervisord.conf – the supervisord daemon config

•       uncomment-fluent-config.sh – script that runs at 12am and resets the count for the next day.


Running the container:


To run the monitoring container you need to do the following:

1.         # docker build -t fbit .

The arguments for this command are:

The docker build command builds Docker images from a Dockerfile and a “context”. A build’s context is the set of files located in the specified PATH


-t is:  Name and optionally a tag in the 'name:tag' format


2.       # docker run -v /data/weiotadm/docker/lib/containers/:/data/we/docker/lib/containers/ -v /data/:/home/prom/  --name=splunk_fbit122 fbit


This runs the container you just compiled in step #1, with mounting the directories

-v or --volume: Consists of three fields, separated by colon characters (:). The fields must be in the correct order, and the meaning of each field is not immediately obvious.

•       In the case of bind mounts, the first field is the path to the file or directory on the host machine.

•       The second field is the path where the file or directory is mounted in the container

•       The third field is optional, and is a comma-separated list of options, such as ro, z, and Z


Note: If you use -v or --volume to bind-mount a file or directory that does not yet exist on the Docker host, -v creates the endpoint for you. It is always created as a directory.

If you use --mount to bind-mount a file or directory that does not yet exist on the Docker host, Docker does not automatically create it for you, but generates an error.


The Docker File explained:



config.sh explained


This is the meat and potatoes of the system, the monitor-log.sh script:


#!/bin/bash
# run every 1-5 minutes to monitor log
## cat config.v2.json | grep -o '"Name":"[^"]*' | grep -o '[^"]*$' 
# load config
source ./config.sh
echo "Monitoring Log Size"
count=0
for entry in "$log_dir"/*; do

  if [ -d $entry ]; then
  count=$((count+1))
  container_folder_name=`basename $entry`
  main_log_file_name=$container_folder_name-json.log
  main_log_file_path=$entry/$main_log_file_name
  if [ ! -f $main_log_file_path ]; then
  continue
  fi
check_config="$(grep -wn "\(^\s.*\|^\|^\#.*\|^\s.*\#.*\)Path.*$container_folder_name" $fluent_config_file | cut -d: -f1)"

echo $check_config

if [ -z "$check_config" ]; then
## add more INPUT configure if it does not exits
echo "
[INPUT]
  name   tail
  Path $entry/*.log
  Parser json
  Skip_Empty_Lines true
  Tag_Regex (.*\/(?<container_id>.*)-json\.log)
  Tag docker.<container_id>
  Docker_Mode true
  Read_from_Head true
  Mem_Buf_Limit         5000MB
  Buffer_Chunk_Size     250k
  Buffer_Max_Size       500k
  Refresh_Interval      10" >> $fluent_config_file
fi
## if file_size > 50MB --> remove fluent config
  ## 
  tag_lines="$(grep -wn "^\s*.*\[\([A-Z]\)*\]\|^\s*\@[A-Z].*" $fluent_config_file | cut -d: -f1)"
log_config_line="$(grep -wn "\(^\s.*\|^\)Path.*$container_folder_name" $fluent_config_file | cut -d: -f1)"
echo "tag_line=" $tag_lines
if [ ! -z "$log_config_line" ]; then
    echo "Log config line: " $log_config_line
    today=`date +"%Y-%m-%d"`
## get container name & max size 
config_json_file=$entry'/config.v2.json'
container_name=`cat ${config_json_file} | grep -o '"Name":"[^"]*' | grep -o '[^"/]*$'`
if [ ${!container_name} ]; then
max_file_size_byte=$((${!container_name}*1024*1024))
else 
## get default max size
max_file_size_byte=$(($default_max_file_size*1024*1024))
fi
echo "max_file_size_byte=" $max_file_size_byte
    
## calculate log size today using grep, cat, sed 
    ##file_size=`grep "${today}" $main_log_file_path | egrep "${log_type_pattern}" |  wc -c`
    file_size=`sed -n "/${today}/p" $main_log_file_path | egrep "${log_type_pattern}" | wc -c`
    ##file_size=`cat $main_log_file_path | grep "${today}" | egrep "${log_type_pattern}" | wc -c`
    
# write file_size into file 
date_format=`date +"%Y%m%d"`
#echo ${file_size} > $entry'/size'${date_format}'.txt'
  echo "log size of container: $container_name=" $file_size "Byte, Max="$max_file_size_byte;
 
  if [ $file_size -lt $max_file_size_byte ]; then
  continue
  fi
 
    #start line & end_line to remove configure
    start_line=0
    end_line=0
    
    for input_line in $tag_lines
    do
        if [ $log_config_line -gt $input_line ]; then
                echo "[INPUT] start at line:" $input_line
                start_line=$input_line
                continue
        else
# less than input_line
end_line=$((input_line-1))
break
fi
    done
    if [[ $start_line -gt 0 && $end_line == 0 ]]; then
            end_line=`wc -l $fluent_config_file | cut -d' ' -f1`
    fi
    if [[ $start_line -gt 0 && $end_line -gt 0 ]]; then
            echo "Comment from: "$start_line "to" $end_line
            sed -i -e ''"${start_line}"','"${end_line}"'s/^/#/' $fluent_config_file
            killall -HUP fluent-bit
    fi

fi
  fi
done


This is the uncomment script:

# This script should run at 00:01 every day
# It uncomments fluent-bit [INPUT] configuration
# load config
source ./config.sh
# find comment INPUT & remove the comment from them. 
tag_lines="$(grep -wn "^\s*.*\[\([A-Z]\)*\]\|^\s*\@[A-Z].*" $fluent_config_file | cut -d: -f1)"
echo "tag_lines="$tag_lines
count=0
for entry in "$log_dir"/*; do
   
  if [ -d $entry ]; then
    count=$((count+1))
    container_folder_name=`basename $entry`
    main_log_file_name=$container_folder_name-json.log
    main_log_file_path=$entry/$main_log_file_name
    
    log_config_line="$(grep -wn "\(^\#.*\|^\s.*\#.*\)Path.*$container_folder_name" $fluent_config_file | cut -d: -f1)"
    #start line & end_line to remove configure
    start_line=0
    end_line=0
    if [ -z "$log_config_line" ]; then
            continue
    fi
    echo "log_config_line=" $log_config_line
    for tag_line in $tag_lines
    do
        if [ $log_config_line -gt $tag_line ]; then
            echo "[INPUT] start at line:" $tag_line
            start_line=$tag_line
          continue
        else

        end_line=$((tag_line-1))
      break
        fi
    done
    if [[ $start_line -gt 0 && $end_line == 0 ]]; then
            end_line=`wc -l $fluent_config_file | cut -d' ' -f1`
    fi
    if [[ $start_line -gt 0 && $end_line -gt 0 ]]; then
            echo "uncomment from: "$start_line " to " $end_line
            sed -i -e ''"${start_line}"','"${end_line}"'s/.*#//' $fluent_config_file
            killall -HUP fluent-bit
    fi
  fi
done