From d460ed6f0b35fff80c4089b650e02ae47ea8a83a Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Wed, 27 Mar 2024 20:38:53 +0000 Subject: [PATCH] Deployed 7755fb6 to main with MkDocs 1.5.3 and mike 1.1.2 --- main/404.html | 2 +- main/addons/argo-events/index.html | 2 +- main/addons/argo-rollouts/index.html | 2 +- main/addons/argo-workflows/index.html | 2 +- main/addons/argocd/index.html | 2 +- main/addons/aws-cloudwatch-metrics/index.html | 6 +- main/addons/aws-efs-csi-driver/index.html | 4 +- main/addons/aws-for-fluentbit/index.html | 10 +-- main/addons/aws-fsx-csi-driver/index.html | 4 +- .../aws-gateway-api-controller/index.html | 2 +- .../aws-load-balancer-controller/index.html | 12 ++-- .../aws-node-termination-handler/index.html | 2 +- main/addons/aws-private-ca-issuer/index.html | 2 +- main/addons/bottlerocket/index.html | 2 +- main/addons/cert-manager/index.html | 2 +- main/addons/cluster-autoscaler/index.html | 2 +- .../index.html | 2 +- main/addons/external-dns/index.html | 6 +- main/addons/external-secrets/index.html | 2 +- main/addons/fargate-fluentbit/index.html | 3 +- main/addons/ingress-nginx/index.html | 2 +- main/addons/karpenter/index.html | 2 +- main/addons/kube-prometheus-stack/index.html | 2 +- main/addons/metrics-server/index.html | 2 +- main/addons/opa-gatekeeper/index.html | 2 +- .../index.html | 2 +- main/addons/velero/index.html | 4 +- .../addons/vertical-pod-autoscaler/index.html | 2 +- main/amazon-eks-addons/index.html | 2 +- main/architectures/index.html | 2 +- main/aws-partner-addons/index.html | 2 +- main/helm-release/index.html | 2 +- main/index.html | 2 +- main/search/search_index.json | 2 +- main/sitemap.xml | 64 +++++++++--------- main/sitemap.xml.gz | Bin 557 -> 557 bytes 36 files changed, 82 insertions(+), 83 deletions(-) diff --git a/main/404.html b/main/404.html index a5da74a0..9016167b 100644 --- a/main/404.html +++ b/main/404.html @@ -573,7 +573,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/argo-events/index.html b/main/addons/argo-events/index.html index d981ff1b..264cc3ed 100644 --- a/main/addons/argo-events/index.html +++ b/main/addons/argo-events/index.html @@ -633,7 +633,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/argo-rollouts/index.html b/main/addons/argo-rollouts/index.html index 879bff39..10b8e711 100644 --- a/main/addons/argo-rollouts/index.html +++ b/main/addons/argo-rollouts/index.html @@ -633,7 +633,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/argo-workflows/index.html b/main/addons/argo-workflows/index.html index 29cdd4ab..68b68de2 100644 --- a/main/addons/argo-workflows/index.html +++ b/main/addons/argo-workflows/index.html @@ -633,7 +633,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/argocd/index.html b/main/addons/argocd/index.html index 236d80e0..789f4fff 100644 --- a/main/addons/argocd/index.html +++ b/main/addons/argocd/index.html @@ -633,7 +633,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/aws-cloudwatch-metrics/index.html b/main/addons/aws-cloudwatch-metrics/index.html index f8f36cca..dc41ce96 100644 --- a/main/addons/aws-cloudwatch-metrics/index.html +++ b/main/addons/aws-cloudwatch-metrics/index.html @@ -633,7 +633,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -1042,10 +1042,10 @@

AWS CloudWatch Metrics

-

Use CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.

+

Use AWS CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.

Container Insights collects data as performance log events using embedded metric format. These performance log events are entries that use a structured JSON schema that enables high-cardinality data to be ingested and stored at scale. From this data, CloudWatch creates aggregated metrics at the cluster, node, pod, task, and service level as CloudWatch metrics. The metrics that Container Insights collects are available in CloudWatch automatic dashboards, and also viewable in the Metrics section of the CloudWatch console.

Usage

-

aws-cloudwatch-metrics can be deployed by enabling the add-on via the following.

+

AWS CloudWatch Metrics can be deployed by enabling the add-on via the following.

enable_aws_cloudwatch_metrics = true
 

You can also customize the Helm chart that deploys aws-cloudwatch-metrics via the following configuration:

diff --git a/main/addons/aws-efs-csi-driver/index.html b/main/addons/aws-efs-csi-driver/index.html index 3aa3f819..d8fb22b7 100644 --- a/main/addons/aws-efs-csi-driver/index.html +++ b/main/addons/aws-efs-csi-driver/index.html @@ -640,7 +640,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -1058,7 +1058,7 @@

AWS EFS CSI Driver

This add-on deploys the AWS EFS CSI driver into an EKS cluster.

Usage

-

The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.

+

The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.

  enable_aws_efs_csi_driver = true
 

You can optionally customize the Helm chart that deploys the driver via the following configuration.

diff --git a/main/addons/aws-for-fluentbit/index.html b/main/addons/aws-for-fluentbit/index.html index 26437d35..67e93488 100644 --- a/main/addons/aws-for-fluentbit/index.html +++ b/main/addons/aws-for-fluentbit/index.html @@ -640,7 +640,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -1056,7 +1056,7 @@

AWS for Fluent Bit

-

AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.

+

AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.

Usage

AWS for Fluent Bit can be deployed by enabling the add-on via the following.

enable_aws_for_fluentbit = true
@@ -1111,14 +1111,14 @@ 

Verify the Fluent Bit setupaws-for-fluent-bit-sbn9b 1/1 Running 0 15m aws-for-fluent-bit-svhwq 1/1 Running 0 15m

-

Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch/

+

Open the CloudWatch console

In the navigation pane, choose Log groups.

Make sure that you're in the Region where you deployed Fluent Bit.

Check the list of log groups in the Region. You should see the following:

-
/aws/eks/complete/aws-fluentbit-logs
+
/aws/eks/complete/aws-fluentbit-logs
 

If you enabled Container Insights, you should also see the following Log Groups in your CloudWatch Console.

-
/aws/containerinsights/Cluster_Name/application
+
/aws/containerinsights/Cluster_Name/application
 
 /aws/containerinsights/Cluster_Name/host
 
diff --git a/main/addons/aws-fsx-csi-driver/index.html b/main/addons/aws-fsx-csi-driver/index.html
index ee45cece..ba0c6c7a 100644
--- a/main/addons/aws-fsx-csi-driver/index.html
+++ b/main/addons/aws-fsx-csi-driver/index.html
@@ -653,7 +653,7 @@
         
   
   
-    AWS Load Balancer Controller.
+    AWS Load Balancer Controller
   
   
 
@@ -1084,7 +1084,7 @@
 

AWS FSx CSI Driver

This add-on deploys the Amazon FSx CSI Driver in to an Amazon EKS Cluster.

Usage

-

The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.

+

The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.

  enable_aws_fsx_csi_driver = true
 

Helm Chart customization

diff --git a/main/addons/aws-gateway-api-controller/index.html b/main/addons/aws-gateway-api-controller/index.html index 1ca5d228..c16c9710 100644 --- a/main/addons/aws-gateway-api-controller/index.html +++ b/main/addons/aws-gateway-api-controller/index.html @@ -633,7 +633,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/aws-load-balancer-controller/index.html b/main/addons/aws-load-balancer-controller/index.html index 16003c4f..40bc919d 100644 --- a/main/addons/aws-load-balancer-controller/index.html +++ b/main/addons/aws-load-balancer-controller/index.html @@ -24,7 +24,7 @@ - AWS Load Balancer Controller. - Amazon EKS Blueprints Addons + AWS Load Balancer Controller - Amazon EKS Blueprints Addons @@ -155,7 +155,7 @@
- AWS Load Balancer Controller. + AWS Load Balancer Controller
@@ -595,7 +595,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -606,7 +606,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -1095,8 +1095,8 @@ -

AWS Load Balancer Controller.

-

AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.

+

AWS Load Balancer Controller

+

AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.

Usage

In order to deploy the AWS Load Balancer Controller Addon via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

diff --git a/main/addons/aws-node-termination-handler/index.html b/main/addons/aws-node-termination-handler/index.html index 2a17e4cc..a34a32cb 100644 --- a/main/addons/aws-node-termination-handler/index.html +++ b/main/addons/aws-node-termination-handler/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/aws-private-ca-issuer/index.html b/main/addons/aws-private-ca-issuer/index.html index f148c653..cff87114 100644 --- a/main/addons/aws-private-ca-issuer/index.html +++ b/main/addons/aws-private-ca-issuer/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/bottlerocket/index.html b/main/addons/bottlerocket/index.html index fa524d08..086381d6 100644 --- a/main/addons/bottlerocket/index.html +++ b/main/addons/bottlerocket/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/cert-manager/index.html b/main/addons/cert-manager/index.html index ed606583..6e661c8c 100644 --- a/main/addons/cert-manager/index.html +++ b/main/addons/cert-manager/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/cluster-autoscaler/index.html b/main/addons/cluster-autoscaler/index.html index 07b4f76a..088702f2 100644 --- a/main/addons/cluster-autoscaler/index.html +++ b/main/addons/cluster-autoscaler/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/cluster-proportional-autoscaler/index.html b/main/addons/cluster-proportional-autoscaler/index.html index f4467d52..fbdc0037 100644 --- a/main/addons/cluster-proportional-autoscaler/index.html +++ b/main/addons/cluster-proportional-autoscaler/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/external-dns/index.html b/main/addons/external-dns/index.html index ae6f7de0..c15ceb73 100644 --- a/main/addons/external-dns/index.html +++ b/main/addons/external-dns/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -1067,10 +1067,8 @@

Usage&

To further configure external-dns, refer to the examples:

diff --git a/main/addons/external-secrets/index.html b/main/addons/external-secrets/index.html index 9498c9b3..4d463c60 100644 --- a/main/addons/external-secrets/index.html +++ b/main/addons/external-secrets/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/fargate-fluentbit/index.html b/main/addons/fargate-fluentbit/index.html index f7c41b60..1be3a4d7 100644 --- a/main/addons/fargate-fluentbit/index.html +++ b/main/addons/fargate-fluentbit/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -1089,6 +1089,7 @@

Usage& retention_in_days = 7 kms_key_id = "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" skip_destroy = true + }

Validation

    diff --git a/main/addons/ingress-nginx/index.html b/main/addons/ingress-nginx/index.html index 31bf9b5e..2caecfe5 100644 --- a/main/addons/ingress-nginx/index.html +++ b/main/addons/ingress-nginx/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/karpenter/index.html b/main/addons/karpenter/index.html index ec8f8aa7..c52e19f1 100644 --- a/main/addons/karpenter/index.html +++ b/main/addons/karpenter/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/kube-prometheus-stack/index.html b/main/addons/kube-prometheus-stack/index.html index 4d6da0f9..1e09209c 100644 --- a/main/addons/kube-prometheus-stack/index.html +++ b/main/addons/kube-prometheus-stack/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/metrics-server/index.html b/main/addons/metrics-server/index.html index a30fa002..ab748c06 100644 --- a/main/addons/metrics-server/index.html +++ b/main/addons/metrics-server/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/opa-gatekeeper/index.html b/main/addons/opa-gatekeeper/index.html index f8f5a314..d2c2fa60 100644 --- a/main/addons/opa-gatekeeper/index.html +++ b/main/addons/opa-gatekeeper/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/secrets-store-csi-driver-provider-aws/index.html b/main/addons/secrets-store-csi-driver-provider-aws/index.html index 75cd1764..84fb280c 100644 --- a/main/addons/secrets-store-csi-driver-provider-aws/index.html +++ b/main/addons/secrets-store-csi-driver-provider-aws/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller diff --git a/main/addons/velero/index.html b/main/addons/velero/index.html index c6a3aace..e945ccc3 100644 --- a/main/addons/velero/index.html +++ b/main/addons/velero/index.html @@ -586,7 +586,7 @@ - AWS Load Balancer Controller. + AWS Load Balancer Controller @@ -1062,7 +1062,7 @@

    VeleroPlugin for AWS

    Usage

    -

    Velero can be deployed by enabling the add-on via the following.

    +

    Velero can be deployed by enabling the add-on via the following.

    enable_velero           = true
     velero_backup_s3_bucket = "<YOUR_BUCKET_NAME>"
     velero = {
    diff --git a/main/addons/vertical-pod-autoscaler/index.html b/main/addons/vertical-pod-autoscaler/index.html
    index b51499e8..a5bb5c4a 100644
    --- a/main/addons/vertical-pod-autoscaler/index.html
    +++ b/main/addons/vertical-pod-autoscaler/index.html
    @@ -584,7 +584,7 @@
             
       
       
    -    AWS Load Balancer Controller.
    +    AWS Load Balancer Controller
       
       
     
    diff --git a/main/amazon-eks-addons/index.html b/main/amazon-eks-addons/index.html
    index 827f2574..02c79b9e 100644
    --- a/main/amazon-eks-addons/index.html
    +++ b/main/amazon-eks-addons/index.html
    @@ -658,7 +658,7 @@
             
       
       
    -    AWS Load Balancer Controller.
    +    AWS Load Balancer Controller
       
       
     
    diff --git a/main/architectures/index.html b/main/architectures/index.html
    index e3f9e461..06582bd4 100644
    --- a/main/architectures/index.html
    +++ b/main/architectures/index.html
    @@ -634,7 +634,7 @@
             
       
       
    -    AWS Load Balancer Controller.
    +    AWS Load Balancer Controller
       
       
     
    diff --git a/main/aws-partner-addons/index.html b/main/aws-partner-addons/index.html
    index d6dfc7fb..625c0697 100644
    --- a/main/aws-partner-addons/index.html
    +++ b/main/aws-partner-addons/index.html
    @@ -594,7 +594,7 @@
             
       
       
    -    AWS Load Balancer Controller.
    +    AWS Load Balancer Controller
       
       
     
    diff --git a/main/helm-release/index.html b/main/helm-release/index.html
    index eeff75cf..83574dea 100644
    --- a/main/helm-release/index.html
    +++ b/main/helm-release/index.html
    @@ -645,7 +645,7 @@
             
       
       
    -    AWS Load Balancer Controller.
    +    AWS Load Balancer Controller
       
       
     
    diff --git a/main/index.html b/main/index.html
    index c3c91ea8..a66f7b9a 100644
    --- a/main/index.html
    +++ b/main/index.html
    @@ -671,7 +671,7 @@
             
       
       
    -    AWS Load Balancer Controller.
    +    AWS Load Balancer Controller
       
       
     
    diff --git a/main/search/search_index.json b/main/search/search_index.json
    index 71678cef..b5980e48 100644
    --- a/main/search/search_index.json
    +++ b/main/search/search_index.json
    @@ -1 +1 @@
    -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":""},{"location":"#amazon-eks-blueprints-addons","title":"Amazon EKS Blueprints Addons","text":"

    Terraform module to deploy Kubernetes addons on Amazon EKS clusters.

    "},{"location":"#usage","title":"Usage","text":"
    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n  version = \"~> 1.0\" #ensure to update this to the latest/desired version\n\n  cluster_name      = module.eks.cluster_name\n  cluster_endpoint  = module.eks.cluster_endpoint\n  cluster_version   = module.eks.cluster_version\n  oidc_provider_arn = module.eks.oidc_provider_arn\n\n  eks_addons = {\n    aws-ebs-csi-driver = {\n      most_recent = true\n    }\n    coredns = {\n      most_recent = true\n    }\n    vpc-cni = {\n      most_recent = true\n    }\n    kube-proxy = {\n      most_recent = true\n    }\n  }\n\n  enable_aws_load_balancer_controller    = true\n  enable_cluster_proportional_autoscaler = true\n  enable_karpenter                       = true\n  enable_kube_prometheus_stack           = true\n  enable_metrics_server                  = true\n  enable_external_dns                    = true\n  enable_cert_manager                    = true\n  cert_manager_route53_hosted_zone_arns  = [\"arn:aws:route53:::hostedzone/XXXXXXXXXXXXX\"]\n\n  tags = {\n    Environment = \"dev\"\n  }\n}\n\nmodule \"eks\" {\n  source = \"terraform-aws-modules/eks/aws\"\n\n  cluster_name    = \"my-cluster\"\n  cluster_version = \"1.29\"\n\n  ... truncated for brevity\n}\n
    "},{"location":"#requirements","title":"Requirements","text":"Name Version terraform >= 1.0 aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#providers","title":"Providers","text":"Name Version aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#modules","title":"Modules","text":"Name Source Version argo_events aws-ia/eks-blueprints-addon/aws 1.1.1 argo_rollouts aws-ia/eks-blueprints-addon/aws 1.1.1 argo_workflows aws-ia/eks-blueprints-addon/aws 1.1.1 argocd aws-ia/eks-blueprints-addon/aws 1.1.1 aws_cloudwatch_metrics aws-ia/eks-blueprints-addon/aws 1.1.1 aws_efs_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 aws_for_fluentbit aws-ia/eks-blueprints-addon/aws 1.1.1 aws_fsx_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 aws_gateway_api_controller aws-ia/eks-blueprints-addon/aws 1.1.1 aws_load_balancer_controller aws-ia/eks-blueprints-addon/aws 1.1.1 aws_node_termination_handler aws-ia/eks-blueprints-addon/aws 1.1.1 aws_node_termination_handler_sqs terraform-aws-modules/sqs/aws 4.0.1 aws_privateca_issuer aws-ia/eks-blueprints-addon/aws 1.1.1 bottlerocket_shadow aws-ia/eks-blueprints-addon/aws ~> 1.1.1 bottlerocket_update_operator aws-ia/eks-blueprints-addon/aws ~> 1.1.1 cert_manager aws-ia/eks-blueprints-addon/aws 1.1.1 cluster_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.1 cluster_proportional_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.1 external_dns aws-ia/eks-blueprints-addon/aws 1.1.1 external_secrets aws-ia/eks-blueprints-addon/aws 1.1.1 gatekeeper aws-ia/eks-blueprints-addon/aws 1.1.1 ingress_nginx aws-ia/eks-blueprints-addon/aws 1.1.1 karpenter aws-ia/eks-blueprints-addon/aws 1.1.1 karpenter_sqs terraform-aws-modules/sqs/aws 4.0.1 kube_prometheus_stack aws-ia/eks-blueprints-addon/aws 1.1.1 metrics_server aws-ia/eks-blueprints-addon/aws 1.1.1 secrets_store_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 secrets_store_csi_driver_provider_aws aws-ia/eks-blueprints-addon/aws 1.1.1 velero aws-ia/eks-blueprints-addon/aws 1.1.1 vpa aws-ia/eks-blueprints-addon/aws 1.1.1"},{"location":"#resources","title":"Resources","text":"Name Type aws_autoscaling_group_tag.aws_node_termination_handler resource aws_autoscaling_lifecycle_hook.aws_node_termination_handler resource aws_cloudwatch_event_rule.aws_node_termination_handler resource aws_cloudwatch_event_rule.karpenter resource aws_cloudwatch_event_target.aws_node_termination_handler resource aws_cloudwatch_event_target.karpenter resource aws_cloudwatch_log_group.aws_for_fluentbit resource aws_cloudwatch_log_group.fargate_fluentbit resource aws_eks_addon.this resource aws_iam_instance_profile.karpenter resource aws_iam_policy.fargate_fluentbit resource aws_iam_role.karpenter resource aws_iam_role_policy_attachment.additional resource aws_iam_role_policy_attachment.karpenter resource helm_release.this resource kubernetes_config_map_v1.aws_logging resource kubernetes_config_map_v1_data.aws_for_fluentbit_containerinsights resource kubernetes_namespace_v1.aws_observability resource time_sleep.this resource aws_caller_identity.current data source aws_eks_addon_version.this data source aws_iam_policy_document.aws_efs_csi_driver data source aws_iam_policy_document.aws_for_fluentbit data source aws_iam_policy_document.aws_fsx_csi_driver data source aws_iam_policy_document.aws_gateway_api_controller data source aws_iam_policy_document.aws_load_balancer_controller data source aws_iam_policy_document.aws_node_termination_handler data source aws_iam_policy_document.aws_privateca_issuer data source aws_iam_policy_document.cert_manager data source aws_iam_policy_document.cluster_autoscaler data source aws_iam_policy_document.external_dns data source aws_iam_policy_document.external_secrets data source aws_iam_policy_document.fargate_fluentbit data source aws_iam_policy_document.karpenter data source aws_iam_policy_document.karpenter_assume_role data source aws_iam_policy_document.velero data source aws_partition.current data source aws_region.current data source"},{"location":"#inputs","title":"Inputs","text":"Name Description Type Default Required argo_events Argo Events add-on configuration values any {} no argo_rollouts Argo Rollouts add-on configuration values any {} no argo_workflows Argo Workflows add-on configuration values any {} no argocd ArgoCD add-on configuration values any {} no aws_cloudwatch_metrics Cloudwatch Metrics add-on configuration values any {} no aws_efs_csi_driver EFS CSI Driver add-on configuration values any {} no aws_for_fluentbit AWS Fluentbit add-on configurations any {} no aws_for_fluentbit_cw_log_group AWS Fluentbit CloudWatch Log Group configurations any {} no aws_fsx_csi_driver FSX CSI Driver add-on configuration values any {} no aws_gateway_api_controller AWS Gateway API Controller add-on configuration values any {} no aws_load_balancer_controller AWS Load Balancer Controller add-on configuration values any {} no aws_node_termination_handler AWS Node Termination Handler add-on configuration values any {} no aws_node_termination_handler_asg_arns List of Auto Scaling group ARNs that AWS Node Termination Handler will monitor for EC2 events list(string) [] no aws_node_termination_handler_sqs AWS Node Termination Handler SQS queue configuration values any {} no aws_privateca_issuer AWS PCA Issuer add-on configurations any {} no bottlerocket_shadow Bottlerocket Update Operator CRDs configuration values any {} no bottlerocket_update_operator Bottlerocket Update Operator add-on configuration values any {} no cert_manager cert-manager add-on configuration values any {} no cert_manager_route53_hosted_zone_arns List of Route53 Hosted Zone ARNs that are used by cert-manager to create DNS records list(string)
    [  \"arn:aws:route53:::hostedzone/*\"]
    no cluster_autoscaler Cluster Autoscaler add-on configuration values any {} no cluster_endpoint Endpoint for your Kubernetes API server string n/a yes cluster_name Name of the EKS cluster string n/a yes cluster_proportional_autoscaler Cluster Proportional Autoscaler add-on configurations any {} no cluster_version Kubernetes <major>.<minor> version to use for the EKS cluster (i.e.: 1.24) string n/a yes create_delay_dependencies Dependency attribute which must be resolved before starting the create_delay_duration list(string) [] no create_delay_duration The duration to wait before creating resources string \"30s\" no create_kubernetes_resources Create Kubernetes resource with Helm or Kubernetes provider bool true no eks_addons Map of EKS add-on configurations to enable for the cluster. Add-on name can be the map keys or set with name any {} no eks_addons_timeouts Create, update, and delete timeout configurations for the EKS add-ons map(string) {} no enable_argo_events Enable Argo Events add-on bool false no enable_argo_rollouts Enable Argo Rollouts add-on bool false no enable_argo_workflows Enable Argo workflows add-on bool false no enable_argocd Enable Argo CD Kubernetes add-on bool false no enable_aws_cloudwatch_metrics Enable AWS Cloudwatch Metrics add-on for Container Insights bool false no enable_aws_efs_csi_driver Enable AWS EFS CSI Driver add-on bool false no enable_aws_for_fluentbit Enable AWS for FluentBit add-on bool false no enable_aws_fsx_csi_driver Enable AWS FSX CSI Driver add-on bool false no enable_aws_gateway_api_controller Enable AWS Gateway API Controller add-on bool false no enable_aws_load_balancer_controller Enable AWS Load Balancer Controller add-on bool false no enable_aws_node_termination_handler Enable AWS Node Termination Handler add-on bool false no enable_aws_privateca_issuer Enable AWS PCA Issuer bool false no enable_bottlerocket_update_operator Enable Bottlerocket Update Operator add-on bool false no enable_cert_manager Enable cert-manager add-on bool false no enable_cluster_autoscaler Enable Cluster autoscaler add-on bool false no enable_cluster_proportional_autoscaler Enable Cluster Proportional Autoscaler bool false no enable_eks_fargate Identifies whether or not respective addons should be modified to support deployment on EKS Fargate bool false no enable_external_dns Enable external-dns operator add-on bool false no enable_external_secrets Enable External Secrets operator add-on bool false no enable_fargate_fluentbit Enable Fargate FluentBit add-on bool false no enable_gatekeeper Enable Gatekeeper add-on bool false no enable_ingress_nginx Enable Ingress Nginx bool false no enable_karpenter Enable Karpenter controller add-on bool false no enable_kube_prometheus_stack Enable Kube Prometheus Stack bool false no enable_metrics_server Enable metrics server add-on bool false no enable_secrets_store_csi_driver Enable CSI Secrets Store Provider bool false no enable_secrets_store_csi_driver_provider_aws Enable AWS CSI Secrets Store Provider bool false no enable_velero Enable Kubernetes Dashboard add-on bool false no enable_vpa Enable Vertical Pod Autoscaler add-on bool false no external_dns external-dns add-on configuration values any {} no external_dns_route53_zone_arns List of Route53 zones ARNs which external-dns will have access to create/manage records (if using Route53) list(string) [] no external_secrets External Secrets add-on configuration values any {} no external_secrets_kms_key_arns List of KMS Key ARNs that are used by Secrets Manager that contain secrets to mount using External Secrets list(string)
    [  \"arn:aws:kms:::key/*\"]
    no external_secrets_secrets_manager_arns List of Secrets Manager ARNs that contain secrets to mount using External Secrets list(string)
    [  \"arn:aws:secretsmanager:::secret:*\"]
    no external_secrets_ssm_parameter_arns List of Systems Manager Parameter ARNs that contain secrets to mount using External Secrets list(string)
    [  \"arn:aws:ssm:::parameter/*\"]
    no fargate_fluentbit Fargate fluentbit add-on config any {} no fargate_fluentbit_cw_log_group AWS Fargate Fluentbit CloudWatch Log Group configurations any {} no gatekeeper Gatekeeper add-on configuration any {} no helm_releases A map of Helm releases to create. This provides the ability to pass in an arbitrary map of Helm chart definitions to create any {} no ingress_nginx Ingress Nginx add-on configurations any {} no karpenter Karpenter add-on configuration values any {} no karpenter_enable_instance_profile_creation Determines whether Karpenter will be allowed to create the IAM instance profile (v1beta1) or if Terraform will (v1alpha1) bool true no karpenter_enable_spot_termination Determines whether to enable native node termination handling bool true no karpenter_node Karpenter IAM role and IAM instance profile configuration values any {} no karpenter_sqs Karpenter SQS queue for native node termination handling configuration values any {} no kube_prometheus_stack Kube Prometheus Stack add-on configurations any {} no metrics_server Metrics Server add-on configurations any {} no oidc_provider_arn The ARN of the cluster OIDC Provider string n/a yes secrets_store_csi_driver CSI Secrets Store Provider add-on configurations any {} no secrets_store_csi_driver_provider_aws CSI Secrets Store Provider add-on configurations any {} no tags A map of tags to add to all resources map(string) {} no velero Velero add-on configuration values any {} no vpa Vertical Pod Autoscaler add-on configuration values any {} no"},{"location":"#outputs","title":"Outputs","text":"Name Description argo_events Map of attributes of the Helm release created argo_rollouts Map of attributes of the Helm release created argo_workflows Map of attributes of the Helm release created argocd Map of attributes of the Helm release created aws_cloudwatch_metrics Map of attributes of the Helm release and IRSA created aws_efs_csi_driver Map of attributes of the Helm release and IRSA created aws_for_fluentbit Map of attributes of the Helm release and IRSA created aws_fsx_csi_driver Map of attributes of the Helm release and IRSA created aws_gateway_api_controller Map of attributes of the Helm release and IRSA created aws_load_balancer_controller Map of attributes of the Helm release and IRSA created aws_node_termination_handler Map of attributes of the Helm release and IRSA created aws_privateca_issuer Map of attributes of the Helm release and IRSA created bottlerocket_update_operator Map of attributes of the Helm release and IRSA created cert_manager Map of attributes of the Helm release and IRSA created cluster_autoscaler Map of attributes of the Helm release and IRSA created cluster_proportional_autoscaler Map of attributes of the Helm release and IRSA created eks_addons Map of attributes for each EKS addons enabled external_dns Map of attributes of the Helm release and IRSA created external_secrets Map of attributes of the Helm release and IRSA created fargate_fluentbit Map of attributes of the configmap and IAM policy created gatekeeper Map of attributes of the Helm release and IRSA created gitops_metadata GitOps Bridge metadata helm_releases Map of attributes of the Helm release created ingress_nginx Map of attributes of the Helm release and IRSA created karpenter Map of attributes of the Helm release and IRSA created kube_prometheus_stack Map of attributes of the Helm release and IRSA created metrics_server Map of attributes of the Helm release and IRSA created secrets_store_csi_driver Map of attributes of the Helm release and IRSA created secrets_store_csi_driver_provider_aws Map of attributes of the Helm release and IRSA created velero Map of attributes of the Helm release and IRSA created vpa Map of attributes of the Helm release and IRSA created"},{"location":"amazon-eks-addons/","title":"Amazon EKS Add-ons","text":"

    The Amazon EKS add-on implementation is generic and can be used to deploy any add-on supported by the EKS API; either native EKS addons or third party add-ons supplied via the AWS Marketplace.

    See the EKS documentation for more details on EKS addon-ons, including the list of Amazon EKS add-ons from Amazon EKS, as well as Additional Amazon EKS add-ons from independent software vendors.

    "},{"location":"amazon-eks-addons/#architecture-support","title":"Architecture Support","text":"

    The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality.

    Add-on x86_64/amd64 arm64 vpc-cni \u2705 \u2705 aws-ebs-csi-driver \u2705 \u2705 coredns \u2705 \u2705 kube-proxy \u2705 \u2705 adot \u2705 \u2705 aws-guardduty-agent \u2705 \u2705"},{"location":"amazon-eks-addons/#usage","title":"Usage","text":"

    The Amazon EKS add-ons are provisioned via a generic interface behind the eks_addons argument which accepts a map of add-on configurations. The generic interface for an add-on is defined below for reference:

    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n\n  # ... truncated for brevity\n\n  eks_addons = {\n    <key> = {\n      name = string # Optional - <key> is used if `name` is not set\n\n      most_recent          = bool\n      addon_version        = string # overrides `most_recent` if set\n      configuration_values = string # JSON string\n\n      preserve                    = bool # defaults to `true`\n      resolve_conflicts_on_create = string # defaults to `OVERWRITE`\n      resolve_conflicts_on_update = string # defaults to `OVERWRITE`\n\n      timeouts = {\n        create = string # optional\n        update = string # optional\n        delete = string # optional\n      }\n\n      tags = map(string)\n    }\n  }\n}\n
    "},{"location":"amazon-eks-addons/#example","title":"Example","text":"
    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n\n  # ... truncated for brevity\n\n  eks_addons = {\n    # Amazon EKS add-ons\n    aws-ebs-csi-driver = {\n      most_recent              = true\n      service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn\n    }\n\n    coredns = {\n      most_recent = true\n\n      timeouts = {\n        create = \"25m\"\n        delete = \"10m\"\n      }\n    }\n\n    vpc-cni = {\n      most_recent              = true\n      service_account_role_arn = module.vpc_cni_irsa.iam_role_arn\n    }\n\n    kube-proxy = {}\n\n    # Third party add-ons via AWS Marketplace\n    kubecost_kubecost = {\n      most_recent = true\n    }\n\n    teleport_teleport = {\n      most_recent = true\n    }\n  }\n}\n
    "},{"location":"amazon-eks-addons/#configuration-values","title":"Configuration Values","text":"

    You can supply custom configuration values to each addon via the configuration_values argument of the add-on definition. The value provided must be a JSON encoded string and adhere to the JSON scheme provided by the version of the add-on. You can view this schema using the awscli by supplying the add-on name and version to the describe-addon-configuration command:

    aws eks describe-addon-configuration \\\n--addon-name coredns \\\n--addon-version v1.10.1-eksbuild.2 \\\n--query 'configurationSchema' \\\n--output text | jq\n

    Which returns the formatted JSON schema like below:

    {\n  \"$ref\": \"#/definitions/Coredns\",\n  \"$schema\": \"http://json-schema.org/draft-06/schema#\",\n  \"definitions\": {\n    \"Coredns\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"affinity\": {\n          \"default\": {\n            \"affinity\": {\n              \"nodeAffinity\": {\n                \"requiredDuringSchedulingIgnoredDuringExecution\": {\n                  \"nodeSelectorTerms\": [\n                    {\n                      \"matchExpressions\": [\n                        {\n                          \"key\": \"kubernetes.io/os\",\n                          \"operator\": \"In\",\n                          \"values\": [\n                            \"linux\"\n                          ]\n                        },\n                        {\n                          \"key\": \"kubernetes.io/arch\",\n                          \"operator\": \"In\",\n                          \"values\": [\n                            \"amd64\",\n                            \"arm64\"\n                          ]\n                        }\n                      ]\n                    }\n                  ]\n                }\n              },\n              \"podAntiAffinity\": {\n                \"preferredDuringSchedulingIgnoredDuringExecution\": [\n                  {\n                    \"podAffinityTerm\": {\n                      \"labelSelector\": {\n                        \"matchExpressions\": [\n                          {\n                            \"key\": \"k8s-app\",\n                            \"operator\": \"In\",\n                            \"values\": [\n                              \"kube-dns\"\n                            ]\n                          }\n                        ]\n                      },\n                      \"topologyKey\": \"kubernetes.io/hostname\"\n                    },\n                    \"weight\": 100\n                  }\n                ]\n              }\n            }\n          },\n          \"description\": \"Affinity of the coredns pods\",\n          \"type\": [\n            \"object\",\n            \"null\"\n          ]\n        },\n        \"computeType\": {\n          \"type\": \"string\"\n        },\n        \"corefile\": {\n          \"description\": \"Entire corefile contents to use with installation\",\n          \"type\": \"string\"\n        },\n        \"nodeSelector\": {\n          \"additionalProperties\": {\n            \"type\": \"string\"\n          },\n          \"type\": \"object\"\n        },\n        \"replicaCount\": {\n          \"type\": \"integer\"\n        },\n        \"resources\": {\n          \"$ref\": \"#/definitions/Resources\"\n        },\n        \"tolerations\": {\n          \"default\": [\n            {\n              \"key\": \"CriticalAddonsOnly\",\n              \"operator\": \"Exists\"\n            },\n            {\n              \"key\": \"node-role.kubernetes.io/master\",\n              \"operator\": \"NoSchedule\"\n            }\n          ],\n          \"description\": \"Tolerations of the coredns pod\",\n          \"items\": {\n            \"type\": \"object\"\n          },\n          \"type\": \"array\"\n        },\n        \"topologySpreadConstraints\": {\n          \"description\": \"The coredns pod topology spread constraints\",\n          \"type\": \"array\"\n        }\n      },\n      \"title\": \"Coredns\",\n      \"type\": \"object\"\n    },\n    \"Limits\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"cpu\": {\n          \"type\": \"string\"\n        },\n        \"memory\": {\n          \"type\": \"string\"\n        }\n      },\n      \"title\": \"Limits\",\n      \"type\": \"object\"\n    },\n    \"Resources\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"limits\": {\n          \"$ref\": \"#/definitions/Limits\"\n        },\n        \"requests\": {\n          \"$ref\": \"#/definitions/Limits\"\n        }\n      },\n      \"title\": \"Resources\",\n      \"type\": \"object\"\n    }\n  }\n}\n

    You can supply the configuration values to the add-on by passing a map of the values wrapped in the jsonencode() function as shown below:

    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n\n  # ... truncated for brevity\n\n  eks_addons = {\n     coredns = {\n      most_recent = true\n      configuration_values = jsonencode({\n        replicaCount = 4\n        tolerations = [\n        {\n          key      = \"dedicated\",\n          operator = \"Equal\",\n          effect   = \"NoSchedule\",\n          value    = \"orchestration-seb\"\n        }\n        ]\n\n        topologySpreadConstraints = [\n          {\n            maxSkew = 1\n            topologyKey = \"topology.kubernetes.io/zone\"\n            whenUnsatisfiable = \"ScheduleAnyway\"\n            labelSelector = {\n              matchLabels = {\n                k8s-app: \"kube-dns\"\n              }\n            }\n          }\n        ]\n\n        affinity = {\n          nodeAffinity = {\n            requiredDuringSchedulingIgnoredDuringExecution = {\n              nodeSelectorTerms = [\n              {\n                matchExpressions = [\n                  {\n                    key = \"kubernetes.io/os\"\n                    operator = \"In\"\n                    values = [\"linux\"]\n                  },\n                  {\n                    key = \"kubernetes.io/arch\"\n                    operator = \"In\"\n                    values = [\"amd64\"]\n                  }\n                ]\n              }]\n            }\n          }\n\n          podAffinity = {\n            requiredDuringSchedulingIgnoredDuringExecution = [{\n                labelSelector = {\n                  matchExpressions = [\n                    {\n                      key = \"k8s-app\"\n                      operator = \"NotIn\"\n                      values = [\"kube-dns\"]\n                    }\n                  ]\n                }\n                topologyKey = \"kubernetes.io/hostname\"\n            }\n            ]\n          }\n\n          podAntiAffinity = {\n            preferredDuringSchedulingIgnoredDuringExecution = [{\n              podAffinityTerm = {\n                labelSelector = {\n                  matchExpressions = [\n                    {\n                      key = \"k8s-app\"\n                      operator = \"In\"\n                      values = [\"kube-dns\"]\n                    }\n                  ]\n                }\n                topologyKey = \"kubernetes.io/hostname\"\n              }\n              weight = 100\n              }\n            ]\n\n            requiredDuringSchedulingIgnoredDuringExecution = [{\n                labelSelector = {\n                  matchExpressions = [\n                    {\n                      key = \"k8s-app\"\n                      operator = \"In\"\n                      values = [\"kube-dns\"]\n                    }\n                  ]\n                }\n                topologyKey = \"kubernetes.io/hostname\"\n              }\n            ]\n          }\n\n        }\n\n        resources = {\n          limits = {\n            cpu    = \"100m\"\n            memory = \"150Mi\"\n          }\n          requests = {\n            cpu    = \"100m\"\n            memory = \"150Mi\"\n        }\n      })\n    }\n
    "},{"location":"architectures/","title":"Architectures","text":""},{"location":"architectures/#addons","title":"Addons","text":"Addon x86_64/amd64 arm64 Argo Rollouts \u2705 \u2705 Argo Workflows \u2705 \u2705 Argo CD \u2705 \u2705 AWS CloudWatch Metrics \u2705 \u2705 AWS EFS CSI Driver \u2705 \u2705 AWS for FluentBit \u2705 \u2705 AWS FSx CSI Driver \u2705 \u2705 AWS Load Balancer Controller \u2705 \u2705 AWS Node Termination Handler \u2705 \u2705 AWS Private CA Issuer \u2705 \u2705 Cert Manager \u2705 \u2705 Cluster Autoscaler \u2705 \u2705 Cluster Proportional Autoscaler \u2705 \u2705 External DNS \u2705 \u2705 External Secrets \u2705 \u2705 OPA Gatekeeper \u2705 \u2705 Ingress Nginx \u2705 \u2705 Karpenter \u2705 \u2705 Kube-Prometheus Stack \u2705 \u2705 Metrics Server \u2705 \u2705 Secrets Store CSI Driver \u2705 \u2705 Secrets Store CSI Driver Provider AWS \u2705 \u2705 Velero \u2705 \u2705 Vertical Pod Autoscaler \u2705 \u2705"},{"location":"architectures/#amazon-eks-addons","title":"Amazon EKS Addons","text":"

    The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality. These addons are specified via the eks_addons input variable.

    Addon x86_64/amd64 arm64 AWS VPC CNI \u2705 \u2705 AWS EBS CSI Driver \u2705 \u2705 CoreDNS \u2705 \u2705 Kube-proxy \u2705 \u2705 ADOT Collector \u2705 \u2705 AWS GuardDuty Agent \u2705 \u2705"},{"location":"aws-partner-addons/","title":"AWS Partner Addons","text":"

    The following addons are provided by AWS Partners for use with Amazon EKS Blueprints for Terraform. Please see the respective addon repository for more information on the addon, its supported configuration values, as well as questions, comments, and feature requests.

    Addon Description Ondat Ondat is a Kubernetes-native storage platform that enables stateful applications to run on Kubernetes. Hashicorp - Consul Consul is a service networking solution to automate network configurations, discover services, and enable secure connectivity across any cloud or runtime. Hashicorp - Vault Vault secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets in modern computing. Sysdig Sysdig CNAPP helps you stop cloud and container security attacks with no wasted time. Tetrate Istio Tetrate Istio Distro is an open source project from Tetrate that provides vetted builds of Istio tested against all major cloud platforms. NetApp ONTAP Astra Trident NetApp's Astra Trident provides dynamic storage orchestration for FSx for NetApp ONTAP using a Container Storage Interface (CSI) compliant driver. Kong Konnect - Kong Gateway Kong Gateway is the fastest and most adopted API gateway that integrates with Kong Konnect, the end-to-end SaaS API lifecycle management platform. Kong Konnect - Kong Ingress Controller Kong Ingress Controller combines the powerful features of the widely popular Kong Gateway with Kubernetes in a truly Kubernetes-native manner and now integrated with Kong Konnect, the end-to-end SaaS API lifecycle management platform. Kong Konnect - Kong Mesh Manager Kong Mesh is the most flexible, enterprise-proven, service-to-service connectivity solution for developing modern applications that drive compelling digital experiences for an organization\u2019s customers. CloudBees CI CloudBees CI is a highly scalable, resilient, and flexible continuous integration (CI) solution based on the popular Jenkins build orchestration tool. It provides a centralized, shared, and self-service experience tailored for all development teams utilizing Jenkins."},{"location":"helm-release/","title":"Helm Release Add-ons","text":"

    Starting with EKS Blueprints v5 we have made a decision to only support the provisioning of a certain core set of add-ons. On an going basis, we will evaluate the current list to see if more add-ons need to be supported via this repo. Typically you can expect that any AWS created add-on that is not yet available via the Amazon EKS add-ons will be prioritized to be provisioned through this repository.

    In addition to these AWS add-ons, we will also support the provisioning of certain OSS add-ons that we think customers will benefit from. These are selected based on customer demand (e.g. metrics-server) and certain patterns (gitops) that are foundational elements for a complete blueprint of an EKS cluster.

    One of the reasons customers pick Kubernetes is because of its strong commercial and open-source software ecosystem and would like to provision add-ons that are not necessarily supported by EKS Blueprints. For such add-ons the options are as following:

    "},{"location":"helm-release/#with-helm_release-terraform-resource","title":"With helm_release Terraform Resource","text":"

    The helm_release resource is the most fundamental way to provision a helm chart via Terraform.

    Use this resource, if you need to control the lifecycle add-ons down to level of each add-on resource.

    "},{"location":"helm-release/#with-helm_releases-variable","title":"With helm_releases Variable","text":"

    You can use the helm_releases variable in EKS Blueprints Add-ons to provide a map of add-ons and their respective Helm configuration. Under the hood, we just iterate through the provided map and pass each configuration to the Terraform helm_release resource.

    E.g.

    module \"addons\" {\n  source  = \"aws-ia/eks-blueprints-addons/aws\"\n  version = \"~> 1.0\"\n\n  cluster_name      = \"<cluster_name>\"\n  cluster_endpoint  = \"<cluster_endpoint>\"\n  cluster_version   = \"<cluster_version>\"\n  oidc_provider_arn = \"<oidc_provider_arn>\"\n\n  # EKS add-ons\n  eks_addons = {\n    coredns = {}\n    vpc-cni = {}\n    kube-proxy = {}\n  }\n\n  # Blueprints add-ons\n  enable_aws_efs_csi_driver                    = true\n  enable_aws_cloudwatch_metrics                = true\n  enable_cert_manager                          = true\n  ...\n\n  # Pass in any number of Helm charts to be created for those that are not natively supported\n  helm_releases = {\n    prometheus-adapter = {\n      description      = \"A Helm chart for k8s prometheus adapter\"\n      namespace        = \"prometheus-adapter\"\n      create_namespace = true\n      chart            = \"prometheus-adapter\"\n      chart_version    = \"4.2.0\"\n      repository       = \"https://prometheus-community.github.io/helm-charts\"\n      values = [\n        <<-EOT\n          replicas: 2\n          podDisruptionBudget:\n            enabled: true\n        EOT\n      ]\n    }\n    gpu-operator = {\n      description      = \"A Helm chart for NVIDIA GPU operator\"\n      namespace        = \"gpu-operator\"\n      create_namespace = true\n      chart            = \"gpu-operator\"\n      chart_version    = \"v23.3.2\"\n      repository       = \"https://nvidia.github.io/gpu-operator\"\n      values = [\n        <<-EOT\n          operator:\n            defaultRuntime: containerd\n        EOT\n      ]\n    }\n  }\n\n  tags = local.tags\n}\n

    With this pattern, the lifecycle of all your add-ons is tied to that of the addons module. This allows you to easily target the addon module in your Terraform apply and destroy commands. E.g.

    terraform apply -target=module.addons\n\nterraform destroy -target=module.addons\n
    "},{"location":"helm-release/#with-eks-blueprints-addon-module","title":"With EKS Blueprints Addon Module","text":"

    If you have an add-on that requires an IAM Role for Service Account (IRSA), we have created a new Terraform module terraform-aws-eks-blueprints-addon that can help provision a Helm chart along with an IAM role and policies with permissions required for the add-on to function properly. We use this module for all of the add-ons that are provisioned by EKS Blueprints Add-ons today.

    You can optionally use this module for add-ons that do not need IRSA or even just to create the IAM resources for IRSA and skip the helm release. Detailed usage of how to consume this module can be found in its readme.

    This pattern can be used to create a Terraform module with a set of add-ons that are not supported in the EKS Blueprints Add-ons today and wrap them in the same module definition. An example of this is the ACK add-ons repository which is a collection of ACK helm chart deployments with IRSA for each of the ACK controllers.

    "},{"location":"addons/argo-events/","title":"Argo Events","text":"

    Argo Events is an open source container-native event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources. Argo Events is implemented as a Kubernetes CRD (Custom Resource Definition).

    "},{"location":"addons/argo-events/#usage","title":"Usage","text":"

    Argo Events can be deployed by enabling the add-on via the following.

    enable_argo_events = true\n

    You can optionally customize the Helm chart that deploys Argo Events via the following configuration.

      enable_argo_events = true\n\n  argo_events = {\n    name          = \"argo-events\"\n    chart_version = \"2.4.0\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argo-events\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argo-events pods are running.

    $ kubectl get pods -n argo-events\nNAME                                                  READY   STATUS    RESTARTS   AGE\nargo-events-controller-manager-bfb894cdb-k8hzn        1/1     Running   0          11m\n
    "},{"location":"addons/argo-rollouts/","title":"Argo Rollouts","text":"

    Argo Rollouts is a Kubernetes controller and set of CRDs which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes.

    "},{"location":"addons/argo-rollouts/#usage","title":"Usage","text":"

    Argo Rollouts can be deployed by enabling the add-on via the following.

    enable_argo_rollouts = true\n

    You can optionally customize the Helm chart that deploys Argo Rollouts via the following configuration.

      enable_argo_rollouts = true\n\n  argo_rollouts = {\n    name          = \"argo-rollouts\"\n    chart_version = \"2.22.3\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argo-rollouts\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argo-rollouts pods are running.

    $ kubectl get pods -n argo-rollouts\nNAME                             READY   STATUS    RESTARTS   AGE\nargo-rollouts-5db5688849-x89zb   0/1     Running   0          11s\n
    "},{"location":"addons/argo-workflows/","title":"Argo Workflows","text":"

    Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).

    "},{"location":"addons/argo-workflows/#usage","title":"Usage","text":"

    Argo Workflows can be deployed by enabling the add-on via the following.

    enable_argo_workflows = true\n

    You can optionally customize the Helm chart that deploys Argo Workflows via the following configuration.

      enable_argo_workflows = true\n\n  argo_workflows = {\n    name          = \"argo-workflows\"\n    chart_version = \"0.28.2\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argo-workflows\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argo-workflows pods are running.

    $ kubectl get pods -n argo-workflows\nNAME                                                  READY   STATUS    RESTARTS   AGE\nargo-workflows-server-68988cd864-22zhr                1/1     Running   0          6m32s\nargo-workflows-workflow-controller-7ff7b5658d-9q44f   1/1     Running   0          6m32s\n
    "},{"location":"addons/argocd/","title":"Argo CD","text":"

    Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.

    "},{"location":"addons/argocd/#usage","title":"Usage","text":"

    Argo CD can be deployed by enabling the add-on via the following.

    enable_argocd = true\n

    You can optionally customize the Helm chart that deploys Argo CD via the following configuration.

      enable_argocd = true\n\n  argocd = {\n    name          = \"argocd\"\n    chart_version = \"5.29.1\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argocd\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argocd pods are running.

    $ kubectl get pods -n argocd\nNAME                                                        READY   STATUS    RESTARTS   AGE\nargo-cd-argocd-application-controller-0                     1/1     Running   0          146m\nargo-cd-argocd-applicationset-controller-678d85f77b-rmpcb   1/1     Running   0          146m\nargo-cd-argocd-dex-server-7b6c9b5969-zpqnl                  1/1     Running   0          146m\nargo-cd-argocd-notifications-controller-6d489b99c9-j6fdw    1/1     Running   0          146m\nargo-cd-argocd-redis-59dd95f5b5-8fx74                       1/1     Running   0          146m\nargo-cd-argocd-repo-server-7b9bd88c95-mh2fz                 1/1     Running   0          146m\nargo-cd-argocd-server-6f9cfdd4d5-8mfpc                      1/1     Running   0          146m\n
    "},{"location":"addons/aws-cloudwatch-metrics/","title":"AWS CloudWatch Metrics","text":"

    Use CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.

    Container Insights collects data as performance log events using embedded metric format. These performance log events are entries that use a structured JSON schema that enables high-cardinality data to be ingested and stored at scale. From this data, CloudWatch creates aggregated metrics at the cluster, node, pod, task, and service level as CloudWatch metrics. The metrics that Container Insights collects are available in CloudWatch automatic dashboards, and also viewable in the Metrics section of the CloudWatch console.

    "},{"location":"addons/aws-cloudwatch-metrics/#usage","title":"Usage","text":"

    aws-cloudwatch-metrics can be deployed by enabling the add-on via the following.

    enable_aws_cloudwatch_metrics = true\n

    You can also customize the Helm chart that deploys aws-cloudwatch-metrics via the following configuration:

      enable_aws_cloudwatch_metrics        = true\n\n  aws_cloudwatch_metrics_irsa_policies = [\"IAM Policies\"]\n  aws_cloudwatch_metrics   = {\n    role_policies = [\"IAM Policies\"]  # extra policies in addition of CloudWatchAgentServerPolicy\n    name          = \"aws-cloudwatch-metrics\"\n    repository    = \"https://aws.github.io/eks-charts\"\n    chart_version = \"0.0.9\"\n    namespace     = \"amazon-cloudwatch\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})] # The value `clusterName` is already set to the EKS cluster name, no need to specify here\n  }\n

    Verify aws-cloudwatch-metrics pods are running

    $ kubectl get pods -n amazon-cloudwatch\n\nNAME                           READY   STATUS    RESTARTS   AGE\naws-cloudwatch-metrics-2dt5h   1/1     Running   0          149m\n
    "},{"location":"addons/aws-efs-csi-driver/","title":"AWS EFS CSI Driver","text":"

    This add-on deploys the AWS EFS CSI driver into an EKS cluster.

    "},{"location":"addons/aws-efs-csi-driver/#usage","title":"Usage","text":"

    The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.

      enable_aws_efs_csi_driver = true\n

    You can optionally customize the Helm chart that deploys the driver via the following configuration.

      enable_aws_efs_csi_driver = true\n\n  # Optional aws_efs_csi_driver_helm_config\n  aws_efs_csi_driver = {\n    repository     = \"https://kubernetes-sigs.github.io/aws-efs-csi-driver/\"\n    chart_version  = \"2.4.1\"\n  }\n  aws_efs_csi_driver {\n    role_policies = [\"<ADDITIONAL_IAM_POLICY_ARN>\"]\n  }\n

    Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

    $ kubectl get deployment efs-csi-controller -n kube-system\n\nNAME                 READY   UP-TO-DATE   AVAILABLE   AGE\nefs-csi-controller   2/2     2            2           4m29s\n
    $ kubectl get daemonset efs-csi-node -n kube-system\n\nNAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE\nefs-csi-node   3         3         3       3            3           beta.kubernetes.io/os=linux   4m32s\n
    "},{"location":"addons/aws-efs-csi-driver/#validate-efs-csi-driver","title":"Validate EFS CSI Driver","text":"

    Follow the static provisioning example described here to validate the CSI driver is working as expected.

    "},{"location":"addons/aws-for-fluentbit/","title":"AWS for Fluent Bit","text":"

    AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.

    "},{"location":"addons/aws-for-fluentbit/#usage","title":"Usage","text":"

    AWS for Fluent Bit can be deployed by enabling the add-on via the following.

    enable_aws_for_fluentbit = true\n

    You can optionally customize the Helm chart that deploys AWS for Fluent Bit via the following configuration.

      enable_aws_for_fluentbit = true\n  aws_for_fluentbit_cw_log_group = {\n    create          = true\n    use_name_prefix = true # Set this to true to enable name prefix\n    name_prefix     = \"eks-cluster-logs-\"\n    retention       = 7\n  }\n  aws_for_fluentbit = {\n    name          = \"aws-for-fluent-bit\"\n    chart_version = \"0.1.28\"\n    repository    = \"https://aws.github.io/eks-charts\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    If you want to enable Container Insights on Amazon EKS through Fluent Bit, you need to add the following parameter in your configuration:

      enable_aws_for_fluentbit = true\n  aws_for_fluentbit = {\n    enable_containerinsights = true\n  }\n

    By default, ClusterInsights will not enable the kubelet monitoring feature, with AWS for FluentBit integration, since this is an optional feature that is suggested to be enabled only on large clusters. To enable the ClusterInsights Use_Kubelet feature you'll need to provide a few more parametees:

      enable_aws_for_fluentbit = true\n  aws_for_fluentbit = {\n    enable_containerinsights = true\n    kubelet_monitoring       = true\n    set = [{\n        name  = \"cloudWatchLogs.autoCreateGroup\"\n        value = true\n      },\n      {\n        name  = \"hostNetwork\"\n        value = true\n      },\n      {\n        name  = \"dnsPolicy\"\n        value = \"ClusterFirstWithHostNet\"\n      }\n    ]\n  }\n
    "},{"location":"addons/aws-for-fluentbit/#verify-the-fluent-bit-setup","title":"Verify the Fluent Bit setup","text":"

    Verify aws-for-fluentbit pods are running.

    $ kubectl -n kube-system get pods -l app.kubernetes.io/name=aws-for-fluent-bit\nNAME                       READY   STATUS    RESTARTS   AGE\naws-for-fluent-bit-6lhkj   1/1     Running   0          15m\naws-for-fluent-bit-sbn9b   1/1     Running   0          15m\naws-for-fluent-bit-svhwq   1/1     Running   0          15m\n

    Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch/

    In the navigation pane, choose Log groups.

    Make sure that you're in the Region where you deployed Fluent Bit.

    Check the list of log groups in the Region. You should see the following:

    /aws/eks/complete/aws-fluentbit-logs\n

    If you enabled Container Insights, you should also see the following Log Groups in your CloudWatch Console.

    /aws/containerinsights/Cluster_Name/application\n\n/aws/containerinsights/Cluster_Name/host\n\n/aws/containerinsights/Cluster_Name/dataplane\n

    Navigate to one of these log groups and check the Last Event Time for the log streams. If it is recent relative to when you deployed Fluent Bit, the setup is verified.

    There might be a slight delay in creating the /dataplane log group. This is normal as these log groups only get created when Fluent Bit starts sending logs for that log group.

    "},{"location":"addons/aws-fsx-csi-driver/","title":"AWS FSx CSI Driver","text":"

    This add-on deploys the Amazon FSx CSI Driver in to an Amazon EKS Cluster.

    "},{"location":"addons/aws-fsx-csi-driver/#usage","title":"Usage","text":"

    The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.

      enable_aws_fsx_csi_driver = true\n
    "},{"location":"addons/aws-fsx-csi-driver/#helm-chart-customization","title":"Helm Chart customization","text":"

    You can optionally customize the Helm chart deployment using a configuration like the following.

      enable_aws_fsx_csi_driver = true\n  aws_fsx_csi_driver = {\n    namespace     = \"aws-fsx-csi-driver\"\n    chart_version = \"1.6.0\"\n    role_policies = <ADDITIONAL_IAM_POLICY_ARN>\n  }\n

    You can find all available Helm Chart parameter values here

    "},{"location":"addons/aws-fsx-csi-driver/#validation","title":"Validation","text":"

    Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

    $ kubectl -n kube-system get deployment fsx-csi-controller\n\nNAME                 READY   UP-TO-DATE   AVAILABLE   AGE\nfsx-csi-controller   2/2     2            2           4m29s\n\n$ kubectl -n kube-system get pods -l app=fsx-csi-controller\nNAME                                  READY   STATUS    RESTARTS   AGE\nfsx-csi-controller-56c6d9bbb8-89cpc   4/4     Running   0          3m30s\nfsx-csi-controller-56c6d9bbb8-9wnlh   4/4     Running   0          3m30s\n
    $ kubectl -n kube-system get daemonset fsx-csi-node\nNAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE\nfsx-csi-node   3         3         3       3            3           kubernetes.io/os=linux   5m27s\n\n$ kubectl -n kube-system get pods -l  app=fsx-csi-node\nNAME                 READY   STATUS    RESTARTS   AGE\nfsx-csi-node-7c5z6   3/3     Running   0          5m29s\nfsx-csi-node-d5q28   3/3     Running   0          5m29s\nfsx-csi-node-hlg8q   3/3     Running   0          5m29s\n

    Create a StorageClass. Replace the SubnetID and the SecurityGroupID with your own values. More details here.

    $ cat <<EOF | kubectl apply -f -\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n  name: fsx-sc\nprovisioner: fsx.csi.aws.com\nparameters:\n  subnetId: <YOUR_SUBNET_IDs>\n  securityGroupIds: <YOUR_SG_ID>\n  perUnitStorageThroughput: \"200\"\n  deploymentType: PERSISTENT_1\nmountOptions:\n  - flock\nEOF\n
    $ kubect describe storageclass fsx-sc\nName:            fsx-sc\nIsDefaultClass:  No\nAnnotations:     kubectl.kubernetes.io/last-applied-configuration={\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{},\"name\":\"fsx-sc\"},\"mountOptions\":null,\"parameters\":{\"deploymentType\":\"PERSISTENT_1\",\"perUnitStorageThroughput\":\"200\",\"securityGroupIds\":\"sg-q1w2e3r4t5y6u7i8o\",\"subnetId\":\"subnet-q1w2e3r4t5y6u7i8o\"},\"provisioner\":\"fsx.csi.aws.com\"}\n\nProvisioner:           fsx.csi.aws.com\nParameters:            deploymentType=PERSISTENT_1,perUnitStorageThroughput=200,securityGroupIds=sg-q1w2e3r4t5y6u7i8o,subnetId=subnet-q1w2e3r4t5y6u7i8o\nAllowVolumeExpansion:  <unset>\nMountOptions:          <none>\nReclaimPolicy:         Delete\nVolumeBindingMode:     Immediate\nEvents:                <none>\n

    Create a PVC.

    $ cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: fsx-claim\nspec:\n  accessModes:\n    - ReadWriteMany\n  storageClassName: fsx-sc\n  resources:\n    requests:\n      storage: 1200Gi\nEOF\n

    Wait for the PV to be created and bound to your PVC.

    $ kubectl get pvc\nNAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nfsx-claim   Bound    pvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            fsx-sc         7m47s\n$ kubectl get pv\nNAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   REASON   AGE\npvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            Delete           Bound    default/fsx-claim   fsx-sc                  2m13s\n
    "},{"location":"addons/aws-gateway-api-controller/","title":"AWS Gateway API Controller","text":"

    AWS Gateway API Controller lets you connect services across multiple Kubernetes clusters through the Kubernetes Gateway API interface. It is also designed to connect services running on EC2 instances, containers, and as serverless functions. It does this by leveraging Amazon VPC Lattice, which works with Kubernetes Gateway API calls to manage Kubernetes objects.

    "},{"location":"addons/aws-gateway-api-controller/#usage","title":"Usage","text":"

    AWS Gateway API Controller can be deployed by enabling the add-on via the following.

      enable_aws_gateway_api_controller = true\n  aws_gateway_api_controller = {\n    repository_username = data.aws_ecrpublic_authorization_token.token.user_name\n    repository_password = data.aws_ecrpublic_authorization_token.token.password\n    set = [{\n      name  = \"clusterVpcId\"\n      value = \"vpc-12345abcd\"\n    }]\n}\n

    You can optionally customize the Helm chart that deploys AWS Gateway API Controller via the following configuration.

      enable_aws_gateway_api_controller = true\n  aws_gateway_api_controller = {\n    name                = \"aws-gateway-api-controller\"\n    chart_version       = \"v0.0.12\"\n    repository          = \"oci://public.ecr.aws/aws-application-networking-k8s\"\n    repository_username = data.aws_ecrpublic_authorization_token.token.user_name\n    repository_password = data.aws_ecrpublic_authorization_token.token.password\n    namespace           = \"aws-application-networking-system\"\n    values              = [templatefile(\"${path.module}/values.yaml\", {})]\n    set = [{\n      name  = \"clusterVpcId\"\n      value = \"vpc-12345abcd\"\n    }]\n  }\n

    Verify aws-gateway-api-controller pods are running.

    $ kubectl get pods -n aws-application-networking-system\nNAME                                                               READY   STATUS    RESTARTS   AGE\naws-gateway-api-controller-aws-gateway-controller-chart-8f42q426   1/1     Running   0          40s\naws-gateway-api-controller-aws-gateway-controller-chart-8f4tbl9g   1/1     Running   0          71s\n

    Deploy example GatewayClass

    $ kubectl apply -f https://raw.githubusercontent.com/aws/aws-application-networking-k8s/main/examples/gatewayclass.yaml\ngatewayclass.gateway.networking.k8s.io/amazon-vpc-lattice created\n

    Describe GatewayClass

    $ kubectl describe gatewayclass\nName:         amazon-vpc-lattice\nNamespace:\nLabels:       <none>\nAnnotations:  <none>\nAPI Version:  gateway.networking.k8s.io/v1beta1\nKind:         GatewayClass\nMetadata:\n  Creation Timestamp:  2023-06-22T22:33:32Z\n  Generation:          1\n  Resource Version:    819021\n  UID:                 aac59195-8f37-4c23-a2a5-b0f363deda77\nSpec:\n  Controller Name:  application-networking.k8s.aws/gateway-api-controller\nStatus:\n  Conditions:\n    Last Transition Time:  2023-06-22T22:33:32Z\n    Message:               Accepted\n    Observed Generation:   1\n    Reason:                Accepted\n    Status:                True\n    Type:                  Accepted\nEvents:                    <none>\n
    "},{"location":"addons/aws-load-balancer-controller/","title":"AWS Load Balancer Controller.","text":"

    AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.

    "},{"location":"addons/aws-load-balancer-controller/#usage","title":"Usage","text":"

    In order to deploy the AWS Load Balancer Controller Addon via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

    NOTE: In versions 2.5 and newer, the AWS Load Balancer Controller becomes the default controller for Kubernetes service resources with the type: LoadBalancer and makes an AWS Network Load Balancer (NLB) for each service. It does this by making a mutating webhook for services, which sets the spec.loadBalancerClass field to service.k8s.aws/nlb for new services of type: LoadBalancer. You can turn off this feature and revert to using the legacy Cloud Provider as the default controller, by setting the helm chart value enableServiceMutatorWebhook to false. The cluster won't provision new Classic Load Balancers for your services unless you turn off this feature. Existing Classic Load Balancers will continue to work.

    module \"eks_blueprints_addons\" {\n\n  enable_aws_load_balancer_controller = true\n  aws_load_balancer_controller = {\n    set = [\n      {\n        name  = \"vpcId\"\n        value = module.vpc.vpc_id\n      },\n      {\n        name  = \"podDisruptionBudget.maxUnavailable\"\n        value = 1\n      },\n      {\n        name  = \"enableServiceMutatorWebhook\"\n        value = \"false\"\n      }\n    ]\n  }\n
    "},{"location":"addons/aws-load-balancer-controller/#helm-chart-customization","title":"Helm Chart customization","text":"

    It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

      aws_load_balancer_controller = {\n    set = [\n      {\n        name  = \"vpcId\"\n        value = module.vpc.vpc_id\n      },\n      {\n        name  = \"podDisruptionBudget.maxUnavailable\"\n        value = 1\n      },\n      {\n        name  = \"resources.requests.cpu\"\n        value = 100m\n      },\n      {\n        name  = \"resources.requests.memory\"\n        value = 128Mi\n      },\n    ]\n  }\n}\n

    You can find all available Helm Chart parameter values here.

    "},{"location":"addons/aws-load-balancer-controller/#validate","title":"Validate","text":"
    1. To validate the deployment, check if the aws-load-balancer-controller Pods were created in the kube-system Namespace, as the following example.
    kubectl -n kube-system get pods | grep aws-load-balancer-controller\nNAMESPACE       NAME                                            READY   STATUS    RESTARTS   AGE\nkube-system     aws-load-balancer-controller-6cbdb58654-fvskt   1/1     Running   0          26m\nkube-system     aws-load-balancer-controller-6cbdb58654-sc7dk   1/1     Running   0          26m\n
    1. Create a Kubernetes Ingress, using the alb IngressClass, pointing to an existing Service. In this example we'll use a Service called example-svc.
    kubectl create ingress example-ingress --class alb --rule=\"/*=example-svc:80\" \\\n--annotation alb.ingress.kubernetes.io/scheme=internet-facing \\\n--annotation alb.ingress.kubernetes.io/target-type=ip\n
    kubectl get ingress\nNAME                CLASS   HOSTS   ADDRESS                                                                 PORTS   AGE\nexample-ingress     alb     *       k8s-example-ingress-7e0d6f03e7-1234567890.us-west-2.elb.amazonaws.com   80      4m9s\n
    "},{"location":"addons/aws-load-balancer-controller/#resources","title":"Resources","text":"

    GitHub Repo Helm Chart AWS Docs

    "},{"location":"addons/aws-node-termination-handler/","title":"AWS Node Termination Handler","text":"

    This project ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as EC2 maintenance events, EC2 Spot interruptions, ASG Scale-In, ASG AZ Rebalance, and EC2 Instance Termination via the API or Console. If not handled, your application code may not stop gracefully, take longer to recover full availability, or accidentally schedule work to nodes that are going down.

    "},{"location":"addons/aws-node-termination-handler/#usage","title":"Usage","text":"

    AWS Node Termination Handler can be deployed by enabling the add-on via the following.

    enable_aws_node_termination_handler = true\n

    You can optionally customize the Helm chart that deploys AWS Node Termination Handler via the following configuration.

      enable_aws_node_termination_handler = true\n\n  aws_node_termination_handler = {\n    name          = \"aws-node-termination-handler\"\n    chart_version = \"0.21.0\"\n    repository    = \"https://aws.github.io/eks-charts\"\n    namespace     = \"aws-node-termination-handler\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify aws-node-termination-handler pods are running.

    $ kubectl get pods -n aws-node-termination-handler\nNAME                                            READY   STATUS    RESTARTS      AGE\naws-node-termination-handler-6f598b6b89-6mqgk   1/1     Running   1 (22h ago)   26h\n

    Verify SQS Queue is created.

    $ aws sqs list-queues\n\n{\n    \"QueueUrls\": [\n        \"https://sqs.us-east-1.amazonaws.com/XXXXXXXXXXXXXX/aws_node_termination_handler20221123072051157700000004\"\n    ]\n}\n

    Verify Event Rules are created.

    $ aws event list-rules\n{\n    [\n        {\n            \"Name\": \"NTH-ASGTerminiate-20230602191740664900000025\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-ASGTerminiate-20230602191740664900000025\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"Auto scaling instance terminate event\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-HealthEvent-20230602191740079300000022\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-HealthEvent-20230602191740079300000022\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"AWS health event\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-InstanceRebalance-20230602191740077100000021\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceRebalance-20230602191740077100000021\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 instance rebalance recommendation\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-InstanceStateChange-20230602191740165000000024\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceStateChange-20230602191740165000000024\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 instance state-change notification\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-SpotInterrupt-20230602191740077100000020\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-SpotInterrupt-20230602191740077100000020\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 spot instance interruption warning\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHASGTermRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHASGTermRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHInstanceStateChangeRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHInstanceStateChangeRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHRebalanceRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHRebalanceRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHScheduledChangeRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHScheduledChangeRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHSpotTermRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHSpotTermRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        }\n    ]\n}\n
    "},{"location":"addons/aws-private-ca-issuer/","title":"AWS Private CA Issuer","text":"

    AWS Private CA is an AWS service that can setup and manage private CAs, as well as issue private certificates. This add-on deploys the AWS Private CA Issuer as an external issuer to cert-manager that signs off certificate requests using AWS Private CA in an Amazon EKS Cluster.

    "},{"location":"addons/aws-private-ca-issuer/#usage","title":"Usage","text":""},{"location":"addons/aws-private-ca-issuer/#pre-requisites","title":"Pre-requisites","text":"

    To deploy the AWS PCA, you need to install cert-manager first, refer to this documentation to do it through EKS Blueprints Addons.

    "},{"location":"addons/aws-private-ca-issuer/#deployment","title":"Deployment","text":"

    With cert-manager deployed in place, you can deploy the AWS Private CA Issuer Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

    module \"eks_blueprints_addons\" {\n\n  enable_cert_manager         = true\n  enable_aws_privateca_issuer = true\n  aws_privateca_issuer = {\n    acmca_arn        = aws_acmpca_certificate_authority.this.arn\n  }\n}\n
    "},{"location":"addons/aws-private-ca-issuer/#helm-chart-customization","title":"Helm Chart customization","text":"

    It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

      aws_privateca_issuer = {\n    acmca_arn        = aws_acmpca_certificate_authority.this.arn\n    namespace        = \"aws-privateca-issuer\"\n    create_namespace = true\n  }\n

    You can find all available Helm Chart parameter values here.

    "},{"location":"addons/aws-private-ca-issuer/#validation","title":"Validation","text":"
    1. List all the pods running in aws-privateca-issuer and cert-manager Namespace.
    kubectl get pods -n aws-privateca-issuer\nkubectl get pods -n cert-manager\n
    1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
    kubectl get certificate -o wide\nNAME      READY   SECRET                  ISSUER                    STATUS                                          AGE\nexample   True    example-clusterissuer   tls-with-aws-pca-issuer   Certificate is up to date and has not expired   41m\n\nkubectl get secret example-clusterissuer\nNAME                    TYPE                DATA   AGE\nexample-clusterissuer   kubernetes.io/tls   3      43m\n
    "},{"location":"addons/aws-private-ca-issuer/#resources","title":"Resources","text":"

    GitHub Repo Helm Chart AWS Docs

    "},{"location":"addons/bottlerocket/","title":"Bottlerocket and Bottlerocket Update Operator","text":"

    Bottlerocket is a Linux-based open-source operating system that focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads.

    The Bottlerocket Update Operator (BRUPOP) is a Kubernetes operator that coordinates Bottlerocket updates on hosts in a cluster. It relies on a controller deployment on one node to orchestrate updates across the cluster, an agent daemon set on every Bottlerocket node, which is responsible for periodically querying and performing updates rolled out in waves to reduce the impact of issues, and an API Server that performs additional authorization.

    Cert-manager is required for the API server to use a CA certificate when communicating over SSL with the agents.

    • Helm charts
    "},{"location":"addons/bottlerocket/#requirements","title":"Requirements","text":"

    BRUPOP perform updates on Nodes running with Bottlerocket OS only. Here are some code snippets of how to setup up Bottlerocket OS Nodes using Managed Node Groups with Terraform Amazon EKS module and Karpenter Node Classes.

    Notice the label bottlerocket.aws/updater-interface-version=2.0.0 set in the [settings.kubernetes.node-labels] section. This label is required for the BRUPOP Agent to query and perform updates. Nodes not labeled will not be checked by the agent.

    "},{"location":"addons/bottlerocket/#managed-node-groups","title":"Managed Node Groups","text":"
    module \"eks\" {\n  source  = \"terraform-aws-modules/eks/aws\"\n  version = \"~> 19.21\"\n...\n  eks_managed_node_groups = {\n    bottlerocket = {\n      platform = \"bottlerocket\"\n      ami_type       = \"BOTTLEROCKET_x86_64\"\n      instance_types = [\"m5.large\", \"m5a.large\"]\n\n      iam_role_attach_cni_policy = true\n\n      min_size     = 1\n      max_size     = 5\n      desired_size = 3\n\n      enable_bootstrap_user_data = true\n      bootstrap_extra_args = <<-EOT\n            [settings.host-containers.admin]\n            enabled = false\n            [settings.host-containers.control]\n            enabled = true\n            [settings.kernel]\n            lockdown = \"integrity\"\n            [settings.kubernetes.node-labels]\n            \"bottlerocket.aws/updater-interface-version\" = \"2.0.0\"\n            [settings.kubernetes.node-taints]\n            \"CriticalAddonsOnly\" = \"true:NoSchedule\"\n          EOT\n    }\n  }\n}\n
    "},{"location":"addons/bottlerocket/#karpenter","title":"Karpenter","text":"
    apiVersion: karpenter.k8s.aws/v1beta1\nkind: EC2NodeClass\nmetadata:\n  name: bottlerocket-example\nspec:\n...\n  amiFamily: Bottlerocket\n  userData:  |\n    [settings.kubernetes]\n    \"kube-api-qps\" = 30\n    \"shutdown-grace-period\" = \"30s\"\n    \"shutdown-grace-period-for-critical-pods\" = \"30s\"\n    [settings.kubernetes.eviction-hard]\n    \"memory.available\" = \"20%\"\n    [settings.kubernetes.node-labels]\n     \"bottlerocket.aws/updater-interface-version\" = \"2.0.0\"\n
    "},{"location":"addons/bottlerocket/#usage","title":"Usage","text":"

    BRUPOP can be deployed with the default configuration by enabling the add-on via the following. Notice the parameter wait = true set for Cert-Manager, this is needed since BRUPOP requires that Cert-Manager CRDs are already present in the cluster to be deployed.

    module \"eks_blueprints_addons\" {\n  source  = \"aws-ia/eks-blueprints-addons/aws\"\n  version = \"~> 1.13\"\n\n  cluster_name      = module.eks.cluster_name\n  cluster_endpoint  = module.eks.cluster_endpoint\n  cluster_version   = module.eks.cluster_version\n  oidc_provider_arn = module.eks.oidc_provider_arn\n\n  enable_cert_manager = true\n  cert_manager = {\n    wait = true\n  }\n  enable_bottlerocket_update_operator = true\n}\n

    You can also customize the Helm charts that deploys bottlerocket_update_operator and the bottlerocket_shadow via the following configuration:

    enable_bottlerocket_update_operator           = true\n\nbottlerocket_update_operator = {\n  name          = \"brupop-operator\"\n  description   = \"A Helm chart for BRUPOP\"\n  chart_version = \"1.3.0\"\n  namespace     = \"brupop\"\n  set           = [{\n    name  = \"scheduler_cron_expression\"\n    value = \"0 * * * * * *\" # Default Unix Cron syntax, set to check every hour. Example \"0 0 23 * * Sat *\" Perform update checks every Saturday at 23H / 11PM\n    }]\n}\n\nbottlerocket_shadow = {\n  name          = \"brupop-crds\"\n  description   = \"A Helm chart for BRUPOP CRDs\"\n  chart_version = \"1.0.0\"\n}\n

    To see a complete working example, see the bottlerocket Blueprints Pattern.

    "},{"location":"addons/bottlerocket/#validate","title":"Validate","text":"
    1. Run update-kubeconfig command:
    aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
    1. Test by listing velero resources provisioned:
    $ kubectl -n brupop-bottlerocket-aws get all\n\nNAME                                                READY   STATUS    RESTARTS      AGE\npod/brupop-agent-5nv6m                              1/1     Running   1 (33h ago)   33h\npod/brupop-agent-h4vw9                              1/1     Running   1 (33h ago)   33h\npod/brupop-agent-sr9ms                              1/1     Running   2 (33h ago)   33h\npod/brupop-apiserver-6ccb74f599-4c9lv               1/1     Running   0             33h\npod/brupop-apiserver-6ccb74f599-h6hg8               1/1     Running   0             33h\npod/brupop-apiserver-6ccb74f599-svw8n               1/1     Running   0             33h\npod/brupop-controller-deployment-58d46595cc-7vxnt   1/1     Running   0             33h\n\nNAME                               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE\nservice/brupop-apiserver           ClusterIP   172.20.153.72   <none>        443/TCP   33h\nservice/brupop-controller-server   ClusterIP   172.20.7.127    <none>        80/TCP    33h\n\nNAME                          DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE\ndaemonset.apps/brupop-agent   3         3         3       3            3           <none>          33h\n\nNAME                                           READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/brupop-apiserver               3/3     3            3           33h\ndeployment.apps/brupop-controller-deployment   1/1     1            1           33h\n\nNAME                                                      DESIRED   CURRENT   READY   AGE\nreplicaset.apps/brupop-apiserver-6ccb74f599               3         3         3       33h\nreplicaset.apps/brupop-controller-deployment-58d46595cc   1         1         1       33h\n\n$ kubectl describe apiservices.apiregistration.k8s.io v2.brupop.bottlerocket.aws\nName:         v2.brupop.bottlerocket.aws\nNamespace:\nLabels:       kube-aggregator.kubernetes.io/automanaged=true\nAnnotations:  <none>\nAPI Version:  apiregistration.k8s.io/v1\nKind:         APIService\nMetadata:\n  Creation Timestamp:  2024-01-30T16:27:15Z\n  Resource Version:    8798\n  UID:                 034abe22-7e5f-4040-9b64-8ca9d55a4af6\nSpec:\n  Group:                   brupop.bottlerocket.aws\n  Group Priority Minimum:  1000\n  Version:                 v2\n  Version Priority:        100\nStatus:\n  Conditions:\n    Last Transition Time:  2024-01-30T16:27:15Z\n    Message:               Local APIServices are always available\n    Reason:                Local\n    Status:                True\n    Type:                  Available\nEvents:                    <none>\n
    1. If not set during the deployment, add the required label bottlerocket.aws/updater-interface-version=2.0.0 as shown below to all the Nodes that you want to have updates handled by BRUPOP.
    $ kubectl label node ip-10-0-34-87.us-west-2.compute.internal bottlerocket.aws/updater-interface-version=2.0.0\nnode/ip-10-0-34-87.us-west-2.compute.internal labeled\n\n$ kubectl get nodes -L bottlerocket.aws/updater-interface-version\nNAME                                        STATUS                     ROLES    AGE   VERSION               UPDATER-INTERFACE-VERSION\nip-10-0-34-87.us-west-2.compute.internal    Ready                      <none>   34h   v1.28.1-eks-d91a302   2.0.0\n
    1. Because the default cron schedule for BRUPOP is set to check for updates every minute, you'll be able to see in a few minutes that the Node had it's version updated automatically with no downtime.
    kubectl get nodes\nNAME                                        STATUS                     ROLES    AGE   VERSION\nip-10-0-34-87.us-west-2.compute.internal    Ready                      <none>   34h   v1.28.4-eks-d91a302\n
    "},{"location":"addons/cert-manager/","title":"Cert-Manager","text":"

    Cert-manager is a X.509 certificate controller for Kubernetes-like workloads. It will obtain certificates from a variety of Issuers, both popular public Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to renew certificates at a configured time before expiry. This Add-on deploys this controller in an Amazon EKS Cluster.

    "},{"location":"addons/cert-manager/#usage","title":"Usage","text":"

    To deploy cert-manager Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

    module \"eks_blueprints_addons\" {\n\n  enable_cert_manager         = true\n}\n
    "},{"location":"addons/cert-manager/#helm-chart-customization","title":"Helm Chart customization","text":"

    It's possible to customize your deployment using the Helm Chart parameters inside the cert-manager configuration block:

      cert-manager = {\n    chart_version    = \"v1.11.1\"\n    namespace        = \"cert-manager\"\n    create_namespace = true\n  }\n

    You can find all available Helm Chart parameter values here

    "},{"location":"addons/cert-manager/#validation","title":"Validation","text":"
    1. Validate if the Cert-Manger Pods are Running.
    kubectl -n cert-manager get pods\nNAME                                      READY   STATUS    RESTARTS   AGE\ncert-manager-5989bcc87-96qvf              1/1     Running   0          2m49s\ncert-manager-cainjector-9b44ddb68-8c7b9   1/1     Running   0          2m49s\ncert-manager-webhook-776b65456-k6br4      1/1     Running   0          2m49s\n
    1. Create a SelfSigned ClusterIssuer resource in the cluster.
    apiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: selfsigned-cluster-issuer\nspec:\n  selfSigned: {}\n
    kubectl get clusterissuers -o wide selfsigned-cluster-issuer\nNAME                        READY   STATUS   AGE\nselfsigned-cluster-issuer   True             3m\n
    1. Create a Certificate in a given Namespace.
    apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n  name: example\n  namespace: default\nspec:\n  isCA: true\n  commonName: example\n  secretName: example-secret\n  privateKey:\n    algorithm: ECDSA\n    size: 256\n  issuerRef:\n    name: selfsigned-cluster-issuer\n    kind: ClusterIssuer\n    group: cert-manager.io\n
    1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
    kubectl get certificate -o wide\nNAME      READY   SECRET           ISSUER                      STATUS                                          AGE\nexample   True    example-secret   selfsigned-cluster-issuer   Certificate is up to date and has not expired   44s\n\nkubectl get secret example-secret\nNAME             TYPE                DATA   AGE\nexample-secret   kubernetes.io/tls   3      70s\n
    "},{"location":"addons/cert-manager/#resources","title":"Resources","text":"

    GitHub Repo Helm Chart

    "},{"location":"addons/cluster-autoscaler/","title":"Cluster Autoscaler","text":"

    The Kubernetes Cluster Autoscaler automatically adjusts the number of nodes in your cluster when pods fail or are rescheduled onto other nodes. The Cluster Autoscaler uses Auto Scaling groups. For more information, see Cluster Autoscaler on AWS.

    "},{"location":"addons/cluster-autoscaler/#usage","title":"Usage","text":"

    Cluster Autoscaler can be deployed by enabling the add-on via the following.

    enable_cluster_autoscaler = true\n

    You can optionally customize the Helm chart that deploys Cluster Autoscaler via the following configuration.

      enable_cluster_autoscaler = true\n\n  cluster_autoscaler = {\n    name          = \"cluster-autoscaler\"\n    chart_version = \"9.29.0\"\n    repository    = \"https://kubernetes.github.io/autoscaler\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify cluster-autoscaler pods are running.

    $ kubectl get pods -n kube-system\nNAME                                                         READY   STATUS    RESTARTS     AGE\ncluster-autoscaler-aws-cluster-autoscaler-7ff79bc484-pm8g9   1/1     Running   1 (2d ago)   2d5h\n
    "},{"location":"addons/cluster-proportional-autoscaler/","title":"Cluster Proportional Autoscaler","text":"

    Horizontal cluster-proportional-autoscaler watches over the number of schedulable nodes and cores of the cluster and resizes the number of replicas for the required resource. This functionality may be desirable for applications that need to be autoscaled with the size of the cluster, such as CoreDNS and other services that scale with the number of nodes/pods in the cluster.

    The cluster-proportional-autoscaler helps to scale the applications using deployment or replicationcontroller or replicaset. This is an alternative solution to Horizontal Pod Autoscaling. It is typically installed as a Deployment in your cluster.

    Refer to the eks-best-practices-guides for addional configuration guidanance.

    "},{"location":"addons/cluster-proportional-autoscaler/#usage","title":"Usage","text":"

    This add-on requires both enable_cluster_proportional_autoscaler and cluster_proportional_autoscaler as mandatory fields.

    The example shows how to enable cluster-proportional-autoscaler for CoreDNS Deployment. CoreDNS deployment is not configured with HPA. So, this add-on helps to scale CoreDNS Add-on according to the size of the nodes and cores.

    This Add-on can be used to scale any application with Deployment objects.

    enable_cluster_proportional_autoscaler  = true\ncluster_proportional_autoscaler  = {\n    values = [\n      <<-EOT\n        nameOverride: kube-dns-autoscaler\n\n        # Formula for controlling the replicas. Adjust according to your needs\n        # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )\n        config:\n          linear:\n            coresPerReplica: 256\n            nodesPerReplica: 16\n            min: 1\n            max: 100\n            preventSinglePointFailure: true\n            includeUnschedulableNodes: true\n\n        # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive).\n        options:\n          target: deployment/coredns # Notice the target as `deployment/coredns`\n\n        serviceAccount:\n          create: true\n          name: kube-dns-autoscaler\n\n        podSecurityContext:\n          seccompProfile:\n            type: RuntimeDefault\n            supplementalGroups: [65534]\n            fsGroup: 65534\n\n        resources:\n          limits:\n            cpu: 100m\n            memory: 128Mi\n          requests:\n            cpu: 100m\n            memory: 128Mi\n\n        tolerations:\n          - key: \"CriticalAddonsOnly\"\n            operator: \"Exists\"\n            description: \"Cluster Proportional Autoscaler for CoreDNS Service\"\n      EOT\n    ]\n}\n
    "},{"location":"addons/cluster-proportional-autoscaler/#expected-result","title":"Expected result","text":"

    The cluster-proportional-autoscaler pod running in the kube-system namespace.

    kubectl -n kube-system get po -l app.kubernetes.io/instance=cluster-proportional-autoscaler\nNAME                                                              READY   STATUS    RESTARTS   AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler-d8dc8477xx7   1/1     Running   0          21h\n
    The cluster-proportional-autoscaler-kube-dns-autoscaler config map exists.
    kubectl -n kube-system get cm cluster-proportional-autoscaler-kube-dns-autoscaler\nNAME                                                  DATA   AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler   1      21h\n

    "},{"location":"addons/cluster-proportional-autoscaler/#testing","title":"Testing","text":"

    To test that coredns pods scale, first take a baseline of how many nodes the cluster has and how many coredns pods are running.

    kubectl get nodes\nNAME                          STATUS   ROLES    AGE   VERSION\nip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME                       READY   STATUS    RESTARTS   AGE\ncoredns-7975d6fb9b-dlkdd   1/1     Running   0          21h\ncoredns-7975d6fb9b-xqqwp   1/1     Running   0          21h\n

    Change the following parameters in the hcl code above so a scaling event can be easily triggered:

            config:\n          linear:\n            coresPerReplica: 4\n            nodesPerReplica: 2\n            min: 1\n            max: 4\n
    and execute terraform apply.

    Increase the managed node group desired size, in this example from 4 to 5. This can be done via the AWS Console.

    Check that the new node came up and coredns scaled up.

    NAME                          STATUS   ROLES    AGE   VERSION\nip-10-0-14-120.ec2.internal   Ready    <none>   10m   v1.26.4-eks-0a21954\nip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME                       READY   STATUS    RESTARTS   AGE\ncoredns-7975d6fb9b-dlkdd   1/1     Running   0          21h\ncoredns-7975d6fb9b-ww64t   1/1     Running   0          10m\ncoredns-7975d6fb9b-xqqwp   1/1     Running   0          21h\n

    "},{"location":"addons/external-dns/","title":"External DNS","text":"

    ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the Kubernetes API to determine a desired list of DNS records. Unlike KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly\u2014e.g. AWS Route 53.

    "},{"location":"addons/external-dns/#usage","title":"Usage","text":"

    External DNS can be deployed by enabling the add-on via the following.

    enable_external_dns = true\n

    You can optionally customize the Helm chart that deploys External DNS via the following configuration.

      enable_external_dns = true\n\n  external_dns = {\n    name          = \"external-dns\"\n    chart_version = \"1.12.2\"\n    repository    = \"https://kubernetes-sigs.github.io/external-dns/\"\n    namespace     = \"external-dns\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n  external_dns_route53_zone_arns = [\"XXXXXXXXXXXXXXXXXXXXXXX\"]\n

    Verify external-dns pods are running.

    $ kubectl get pods -n external-dns\nNAME                            READY   STATUS    RESTARTS     AGE\nexternal-dns-849b89c675-ffnf6   1/1     Running   1 (2d ago)   2d5h\n

    To further configure external-dns, refer to the examples:

    • AWS Load Balancer Controller
    • Route53
      • Same domain for public and private Route53 zones
    • Cloud Map
    • Kube Ingress AWS Controller
    "},{"location":"addons/external-secrets/","title":"External Secrets","text":"

    External Secrets Operator is a Kubernetes operator that integrates external secret management systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets Manager, and many more. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret.

    "},{"location":"addons/external-secrets/#usage","title":"Usage","text":"

    External Secrets can be deployed by enabling the add-on via the following.

    enable_external_secrets = true\n

    You can optionally customize the Helm chart that deploys External Secrets via the following configuration.

      enable_external_secrets = true\n\n  external_secrets = {\n    name          = \"external-secrets\"\n    chart_version = \"0.9.13\"\n    repository    = \"https://charts.external-secrets.io\"\n    namespace     = \"external-secrets\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify external-secrets pods are running.

    $ kubectl get pods -n external-secrets\nNAME                                               READY   STATUS    RESTARTS       AGE\nexternal-secrets-67bfd5b47c-xc5xf                  1/1     Running   1 (2d1h ago)   2d6h\nexternal-secrets-cert-controller-8f75c6f79-qcfx4   1/1     Running   1 (2d1h ago)   2d6h\nexternal-secrets-webhook-78f6bd456-76wmm           1/1     Running   1 (2d1h ago)   2d6h\n
    "},{"location":"addons/external-secrets/#eks-fargate","title":"EKS Fargate","text":"

    By default, external-secrets creates a webhook pod that listens on port 10250 [Reference]:

    yes, by default we use port 10250 for the webhook pod because it's generally allowed throughout most default firewall implementations (GKE, EKS), but it conflicts with Fargate. Any port number should do the trick, as long as there is no sg rules or NACLs blocking it :).

    This module adds a value enable_eks_fargate which will change the webhook port from 10250 to 9443 which matches the prior default value for external-secrets and is typically an acceptable port value within most clusters firewalls today.

    "},{"location":"addons/fargate-fluentbit/","title":"Fargate FluentBit","text":"

    Amazon EKS on Fargate offers a built-in log router based on Fluent Bit. This means that you don't explicitly run a Fluent Bit container as a sidecar, but Amazon runs it for you. All that you have to do is configure the log router. The configuration happens through a dedicated ConfigMap, that is deployed via this Add-on.

    "},{"location":"addons/fargate-fluentbit/#usage","title":"Usage","text":"

    To configure the Fargate Fluentbit ConfigMap via the EKS Blueprints Addons, just reference the following parameters under the module.eks_blueprints_addons.

    module \"eks_blueprints_addons\" {\n\n  enable_fargate_fluentbit = true\n  fargate_fluentbit = {\n    flb_log_cw = true\n  }\n}\n

    It's possible to customize the CloudWatch Log Group parameters in the fargate_fluentbit_cw_log_group configuration block:

      fargate_fluentbit_cw_log_group = {\n\n  name              = \"existing-log-group\"\n  name_prefix       = \"dev-environment-logs\"\n  retention_in_days = 7\n  kms_key_id        = \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n  skip_destroy      = true\n
    "},{"location":"addons/fargate-fluentbit/#validation","title":"Validation","text":"
    1. Check if the aws-logging configMap for Fargate Fluentbit was created.
    kubectl -n aws-observability get configmap aws-logging -o yaml\napiVersion: v1\ndata:\n  filters.conf: |\n    [FILTER]\n      Name parser\n      Match *\n      Key_Name log\n      Parser regex\n      Preserve_Key True\n      Reserve_Data True\n  flb_log_cw: \"true\"\n  output.conf: |\n    [OUTPUT]\n      Name cloudwatch_logs\n      Match *\n      region us-west-2\n      log_group_name /fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\n      log_stream_prefix fargate-logs-\n      auto_create_group true\n  parsers.conf: |\n    [PARSER]\n      Name regex\n      Format regex\n      Regex ^(?<time>[^ ]+) (?<stream>[^ ]+) (?<logtag>[^ ]+) (?<message>.+)$\n      Time_Key time\n      Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n      Time_Keep On\n      Decode_Field_As json message\nimmutable: false\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-05-08T21:14:52Z\"\n  name: aws-logging\n  namespace: aws-observability\n  resourceVersion: \"1795\"\n  uid: d822bcf5-a441-4996-857e-7fb1357bc07e\n
    1. Validate if the CloudWatch LogGroup was created accordingly, and LogStreams were populated.
    aws logs describe-log-groups --log-group-name-prefix \"/fargate-serverless/fargate-fluentbit\"\n{\n    \"logGroups\": [\n        {\n            \"logGroupName\": \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\",\n            \"creationTime\": 1683580491652,\n            \"retentionInDays\": 90,\n            \"metricFilterCount\": 0,\n            \"arn\": \"arn:aws:logs:us-west-2:111122223333:log-group:/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006:*\",\n            \"storedBytes\": 0\n        }\n    ]\n}\n
    aws logs describe-log-streams --log-group-name \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\" --log-stream-name-prefix fargate-logs --query 'logStreams[].logStreamName'\n[\n    \"fargate-logs-flblogs.var.log.fluent-bit.log\",\n    \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-grjsq_kube-system_aws-load-balancer-controller-feaa22b4cdaa71ecfc8355feb81d4b61ea85598a7bb57aef07667c767c6b98e4.log\",\n    \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-wzr46_kube-system_aws-load-balancer-controller-69075ea9ab3c7474eac2a1696d3a84a848a151420cd783d79aeef960b181567f.log\",\n    \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-8cxvq_kube-system_coredns-9e4f3ab435269a566bcbaa606c02c146ad58508e67cef09fa87d5c09e4ac0088.log\",\n    \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-gcjwp_kube-system_coredns-11016818361cd68c32bf8f0b1328f3d92a6d7b8cf5879bfe8b301f393cb011cc.log\"\n]\n
    "},{"location":"addons/fargate-fluentbit/#resources","title":"Resources","text":"

    AWS Docs Fluent Bit for Amazon EKS on AWS Fargate Blog Post

    "},{"location":"addons/ingress-nginx/","title":"Ingress Nginx","text":"

    This add-on installs Ingress Nginx Controller on Amazon EKS. The Ingress Nginx controller uses Nginx as a reverse proxy and load balancer.

    Other than handling Kubernetes ingress objects, this ingress controller can facilitate multi-tenancy and segregation of workload ingresses based on host name (host-based routing) and/or URL Path (path based routing).

    "},{"location":"addons/ingress-nginx/#usage","title":"Usage","text":"

    Ingress Nginx Controller can be deployed by enabling the add-on via the following.

    enable_ingress_nginx = true\n

    You can optionally customize the Helm chart that deploys ingress-nginx via the following configuration.

      enable_ingress_nginx = true\n\n  ingress_nginx = {\n    name          = \"ingress-nginx\"\n    chart_version = \"4.6.1\"\n    repository    = \"https://kubernetes.github.io/ingress-nginx\"\n    namespace     = \"ingress-nginx\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify ingress-nginx pods are running.

    $ kubectl get pods -n ingress-nginx\nNAME                                       READY   STATUS    RESTARTS   AGE\ningress-nginx-controller-f6c55fdc8-8bt2z   1/1     Running   0          44m\n
    "},{"location":"addons/karpenter/","title":"Karpenter","text":""},{"location":"addons/karpenter/#prerequisites","title":"Prerequisites","text":"

    If deploying a node template that uses spot, please ensure you have the Spot service linked role available in your account. You can run the following command to ensure this role is available:

    aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true\n
    "},{"location":"addons/karpenter/#validate","title":"Validate","text":"

    The following command will update the kubeconfig on your local machine and allow you to interact with your EKS Cluster using kubectl to validate the CoreDNS deployment for Fargate.

    1. Run update-kubeconfig command:
    aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
    1. Test by listing all the pods running currently
    kubectl get pods -n karpenter\n\n# Output should look similar to below\nNAME                         READY   STATUS    RESTARTS   AGE\nkarpenter-6f97df4f77-5nqsk   1/1     Running   0          3m28s\nkarpenter-6f97df4f77-n7fkf   1/1     Running   0          3m28s\n
    1. View the current nodes - this example utilizes EKS Fargate for hosting the Karpenter controller so only Fargate nodes are present currently:
    kubectl get nodes\n\n# Output should look similar to below\nNAME                                                STATUS   ROLES    AGE     VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   2m56s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   2m57s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   2m34s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   2m33s   v1.26.3-eks-f4dc2c0\n
    1. Create a sample pause deployment to demonstrate scaling:
    kubectl apply -f - <<EOF\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: inflate\nspec:\n  replicas: 0\n  selector:\n    matchLabels:\n      app: inflate\n  template:\n    metadata:\n      labels:\n        app: inflate\n    spec:\n      terminationGracePeriodSeconds: 0\n      containers:\n        - name: inflate\n          image: public.ecr.aws/eks-distro/kubernetes/pause:3.7\n          resources:\n            requests:\n              cpu: 1\nEOF\n
    1. Scale up the sample pause deployment to see Karpenter respond by provisioning nodes to support the workload:
    kubectl scale deployment inflate --replicas 5\n# To view logs\n# kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller\n
    1. Re-check the nodes, you will now see a new EC2 node provisioned to support the scaled workload:
    kubectl get nodes\n\n# Output should look similar to below\nNAME                                                STATUS   ROLES    AGE     VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   5m15s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   5m16s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   4m53s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   4m52s   v1.26.3-eks-f4dc2c0\nip-10-0-1-184.us-west-2.compute.internal            Ready    <none>   26s     v1.26.2-eks-a59e1f0 # <= new EC2 node launched\n
    1. Remove the sample pause deployment:
    kubectl delete deployment inflate\n
    "},{"location":"addons/kube-prometheus-stack/","title":"Kube Prometheus Stack","text":"

    Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.

    "},{"location":"addons/kube-prometheus-stack/#usage","title":"Usage","text":"

    Kube Prometheus Stack can be deployed by enabling the add-on via the following.

    enable_kube_prometheus_stack = true\n

    You can optionally customize the Helm chart that deploys Kube Prometheus Stack via the following configuration.

      enable_kube_prometheus_stack = true\n\n  kube_prometheus_stack = {\n    name          = \"kube-prometheus-stack\"\n    chart_version = \"51.2.0\"\n    repository    = \"https://prometheus-community.github.io/helm-charts\"\n    namespace     = \"kube-prometheus-stack\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify kube-prometheus-stack pods are running.

    $ kubectl get pods -n external-secrets\nNAME                                                        READY   STATUS    RESTARTS       AGE\nalertmanager-kube-prometheus-stack-alertmanager-0           2/2     Running   3 (2d2h ago)   2d7h\nkube-prometheus-stack-grafana-5c6cf88fd9-8wc9k              3/3     Running   3 (2d2h ago)   2d7h\nkube-prometheus-stack-kube-state-metrics-584d8b5d5f-s6p8d   1/1     Running   1 (2d2h ago)   2d7h\nkube-prometheus-stack-operator-c74ddccb5-8cprr              1/1     Running   1 (2d2h ago)   2d7h\nkube-prometheus-stack-prometheus-node-exporter-vd8lw        1/1     Running   1 (2d2h ago)   2d7h\nprometheus-kube-prometheus-stack-prometheus-0               2/2     Running   2 (2d2h ago)   2d7h\n
    "},{"location":"addons/metrics-server/","title":"Metrics Server","text":"

    Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines.

    Metrics Server collects resource metrics from Kubelets and exposes them in Kubernetes apiserver through Metrics API for use by Horizontal Pod Autoscaler and Vertical Pod Autoscaler. Metrics API can also be accessed by kubectl top, making it easier to debug autoscaling pipelines.

    "},{"location":"addons/metrics-server/#usage","title":"Usage","text":"

    Metrics Server can be deployed by enabling the add-on via the following.

    enable_metrics_server = true\n

    You can optionally customize the Helm chart that deploys External DNS via the following configuration.

      enable_metrics_server = true\n\n  metrics_server = {\n    name          = \"metrics-server\"\n    chart_version = \"3.10.0\"\n    repository    = \"https://kubernetes-sigs.github.io/metrics-server/\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify metrics-server pods are running.

    $ kubectl get pods -n kube-system\nNAME                                   READY   STATUS    RESTARTS       AGE\nmetrics-server-6f9cdd486c-njh8b        1/1     Running   1 (2d2h ago)   2d7h\n
    "},{"location":"addons/opa-gatekeeper/","title":"OPA Gatekeeper","text":"

    Gatekeeper is an admission controller that validates requests to create and update Pods on Kubernetes clusters, using the Open Policy Agent (OPA). Using Gatekeeper allows administrators to define policies with a constraint, which is a set of conditions that permit or deny deployment behaviors in Kubernetes.

    For complete project documentation, please visit the Gatekeeper. For reference templates refer Templates

    "},{"location":"addons/opa-gatekeeper/#usage","title":"Usage","text":"

    Gatekeeper can be deployed by enabling the add-on via the following.

    enable_gatekeeper = true\n

    You can also customize the Helm chart that deploys gatekeeper via the following configuration:

      enable_gatekeeper = true\n\n  gatekeeper = {\n    name          = \"gatekeeper\"\n    chart_version = \"3.12.0\"\n    repository    = \"https://open-policy-agent.github.io/gatekeeper/charts\"\n    namespace     = \"gatekeeper-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n
    "},{"location":"addons/secrets-store-csi-driver-provider-aws/","title":"AWS Secrets Manager and Config Provider for Secret Store CSI Driver","text":"

    AWS offers two services to manage secrets and parameters conveniently in your code. AWS Secrets Manager allows you to easily rotate, manage, and retrieve database credentials, API keys, certificates, and other secrets throughout their lifecycle. AWS Systems Manager Parameter Store provides hierarchical storage for configuration data. The AWS provider for the Secrets Store CSI Driver allows you to make secrets stored in Secrets Manager and parameters stored in Parameter Store appear as files mounted in Kubernetes pods.

    "},{"location":"addons/secrets-store-csi-driver-provider-aws/#usage","title":"Usage","text":"

    AWS Secrets Store CSI Driver can be deployed by enabling the add-on via the following.

    enable_secrets_store_csi_driver              = true\nenable_secrets_store_csi_driver_provider_aws = true\n

    You can optionally customize the Helm chart via the following configuration.

      enable_secrets_store_csi_driver              = true\n  enable_secrets_store_csi_driver_provider_aws = true\n\n  secrets_store_csi_driver_provider_aws = {\n    name          = \"secrets-store-csi-driver\"\n    chart_version = \"0.3.2\"\n    repository    = \"https://aws.github.io/secrets-store-csi-driver-provider-aws\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify metrics-server pods are running.

    $ kubectl get pods -n kube-system\nNAME                                         READY   STATUS    RESTARTS       AGE\nsecrets-store-csi-driver-9l2z8               3/3     Running   1 (2d5h ago)   2d9h\nsecrets-store-csi-driver-provider-aws-2qqkk  1/1     Running   1 (2d5h ago)   2d9h\n
    "},{"location":"addons/velero/","title":"Velero","text":"

    Velero is an open source tool to safely backup and restore, perform disaster recovery, and migrate Kubernetes cluster resources and persistent volumes.

    • Helm chart
    • Plugin for AWS
    "},{"location":"addons/velero/#usage","title":"Usage","text":"

    Velero can be deployed by enabling the add-on via the following.

    enable_velero           = true\nvelero_backup_s3_bucket = \"<YOUR_BUCKET_NAME>\"\nvelero = {\n    s3_backup_location = \"<YOUR_S3_BUCKET_ARN>[/prefix]\"\n  }\n

    You can also customize the Helm chart that deploys velero via the following configuration:

    enable_velero           = true\n\nvelero = {\n  name          = \"velero\"\n  description   = \"A Helm chart for velero\"\n  chart_version = \"3.1.6\"\n  repository    = \"https://vmware-tanzu.github.io/helm-charts/\"\n  namespace     = \"velero\"\n  values        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

    To see a working example, see the stateful example blueprint.

    "},{"location":"addons/velero/#validate","title":"Validate","text":"
    1. Run update-kubeconfig command:
    aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
    1. Test by listing velero resources provisioned:
    kubectl get all -n velero\n\n# Output should look similar to below\nNAME                         READY   STATUS    RESTARTS   AGE\npod/velero-7b8994d56-z89sl   1/1     Running   0          25h\n\nNAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE\nservice/velero   ClusterIP   172.20.20.118   <none>        8085/TCP   25h\n\nNAME                     READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/velero   1/1     1            1           25h\n\nNAME                               DESIRED   CURRENT   READY   AGE\nreplicaset.apps/velero-7b8994d56   1         1         1       25h\n
    1. Get backup location using velero CLI
    velero backup-location get\n\n# Output should look similar to below\nNAME      PROVIDER   BUCKET/PREFIX                                 PHASE       LAST VALIDATED                  ACCESS MODE   DEFAULT\ndefault   aws        stateful-20230503175301619800000005/backups   Available   2023-05-04 15:15:00 -0400 EDT   ReadWrite     true\n
    1. To demonstrate creating a backup and restoring, create a new namespace and run nginx using below commands:
    kubectl create namespace backupdemo\nkubectl run nginx --image=nginx -n backupdemo\n
    1. Create backup of this namespace using velero
    velero backup create backup1 --include-namespaces backupdemo\n\n# Output should look similar to below\nBackup request \"backup1\" submitted successfully.\nRun `velero backup describe backup1` or `velero backup logs backup1` for more details.\n
    1. Describe the backup to check the backup status
    velero backup describe backup1\n\n# Output should look similar to below\nName:         backup1\nNamespace:    velero\nLabels:       velero.io/storage-location=default\nAnnotations:  velero.io/source-cluster-k8s-gitversion=v1.26.2-eks-a59e1f0\n              velero.io/source-cluster-k8s-major-version=1\n              velero.io/source-cluster-k8s-minor-version=26+\n\nPhase:  Completed\n\n\nNamespaces:\n  Included:  backupdemo\n  Excluded:  <none>\n\nResources:\n  Included:        *\n  Excluded:        <none>\n  Cluster-scoped:  auto\n\nLabel selector:  <none>\n\nStorage Location:  default\n\nVelero-Native Snapshot PVs:  auto\n\nTTL:  720h0m0s\n\nCSISnapshotTimeout:    10m0s\nItemOperationTimeout:  0s\n\nHooks:  <none>\n\nBackup Format Version:  1.1.0\n\nStarted:    2023-05-04 15:16:31 -0400 EDT\nCompleted:  2023-05-04 15:16:33 -0400 EDT\n\nExpiration:  2023-06-03 15:16:31 -0400 EDT\n\nTotal items to be backed up:  9\nItems backed up:              9\n\nVelero-Native Snapshots: <none included>\n
    1. Delete the namespace - this will be restored using the backup created
    kubectl delete namespace backupdemo\n
    1. Restore the namespace from your backup
    velero restore create --from-backup backup1\n
    1. Verify that the namespace is restored
    kubectl get all -n backupdemo\n\n# Output should look similar to below\nNAME        READY   STATUS    RESTARTS   AGE\npod/nginx   1/1     Running   0          21s\n
    "},{"location":"addons/vertical-pod-autoscaler/","title":"Vertical Pod Autoscaler","text":"

    VPA Vertical Pod Autoscaler (VPA) automatically adjusts the CPU and memory reservations for your pods to help \"right size\" your applications. When configured, it will automatically request the necessary reservations based on usage and thus allow proper scheduling onto nodes so that the appropriate resource amount is available for each pod. It will also maintain ratios between limits and requests that were specified in initial container configuration.

    NOTE: Metrics Server add-on is a dependency for this addon

    "},{"location":"addons/vertical-pod-autoscaler/#usage","title":"Usage","text":"

    This step deploys the Vertical Pod Autoscaler with default Helm Chart config

      enable_vpa            = true\n  enable_metrics_server = true\n

    You can also customize the Helm chart that deploys vpa via the following configuration:

      enable_vpa = true\n  enable_metrics_server = true\n\n  vpa = {\n    name          = \"vpa\"\n    chart_version = \"1.7.5\"\n    repository    = \"https://charts.fairwinds.com/stable\"\n    namespace     = \"vpa\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n
    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":""},{"location":"#amazon-eks-blueprints-addons","title":"Amazon EKS Blueprints Addons","text":"

    Terraform module to deploy Kubernetes addons on Amazon EKS clusters.

    "},{"location":"#usage","title":"Usage","text":"
    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n  version = \"~> 1.0\" #ensure to update this to the latest/desired version\n\n  cluster_name      = module.eks.cluster_name\n  cluster_endpoint  = module.eks.cluster_endpoint\n  cluster_version   = module.eks.cluster_version\n  oidc_provider_arn = module.eks.oidc_provider_arn\n\n  eks_addons = {\n    aws-ebs-csi-driver = {\n      most_recent = true\n    }\n    coredns = {\n      most_recent = true\n    }\n    vpc-cni = {\n      most_recent = true\n    }\n    kube-proxy = {\n      most_recent = true\n    }\n  }\n\n  enable_aws_load_balancer_controller    = true\n  enable_cluster_proportional_autoscaler = true\n  enable_karpenter                       = true\n  enable_kube_prometheus_stack           = true\n  enable_metrics_server                  = true\n  enable_external_dns                    = true\n  enable_cert_manager                    = true\n  cert_manager_route53_hosted_zone_arns  = [\"arn:aws:route53:::hostedzone/XXXXXXXXXXXXX\"]\n\n  tags = {\n    Environment = \"dev\"\n  }\n}\n\nmodule \"eks\" {\n  source = \"terraform-aws-modules/eks/aws\"\n\n  cluster_name    = \"my-cluster\"\n  cluster_version = \"1.29\"\n\n  ... truncated for brevity\n}\n
    "},{"location":"#requirements","title":"Requirements","text":"Name Version terraform >= 1.0 aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#providers","title":"Providers","text":"Name Version aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#modules","title":"Modules","text":"Name Source Version argo_events aws-ia/eks-blueprints-addon/aws 1.1.1 argo_rollouts aws-ia/eks-blueprints-addon/aws 1.1.1 argo_workflows aws-ia/eks-blueprints-addon/aws 1.1.1 argocd aws-ia/eks-blueprints-addon/aws 1.1.1 aws_cloudwatch_metrics aws-ia/eks-blueprints-addon/aws 1.1.1 aws_efs_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 aws_for_fluentbit aws-ia/eks-blueprints-addon/aws 1.1.1 aws_fsx_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 aws_gateway_api_controller aws-ia/eks-blueprints-addon/aws 1.1.1 aws_load_balancer_controller aws-ia/eks-blueprints-addon/aws 1.1.1 aws_node_termination_handler aws-ia/eks-blueprints-addon/aws 1.1.1 aws_node_termination_handler_sqs terraform-aws-modules/sqs/aws 4.0.1 aws_privateca_issuer aws-ia/eks-blueprints-addon/aws 1.1.1 bottlerocket_shadow aws-ia/eks-blueprints-addon/aws ~> 1.1.1 bottlerocket_update_operator aws-ia/eks-blueprints-addon/aws ~> 1.1.1 cert_manager aws-ia/eks-blueprints-addon/aws 1.1.1 cluster_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.1 cluster_proportional_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.1 external_dns aws-ia/eks-blueprints-addon/aws 1.1.1 external_secrets aws-ia/eks-blueprints-addon/aws 1.1.1 gatekeeper aws-ia/eks-blueprints-addon/aws 1.1.1 ingress_nginx aws-ia/eks-blueprints-addon/aws 1.1.1 karpenter aws-ia/eks-blueprints-addon/aws 1.1.1 karpenter_sqs terraform-aws-modules/sqs/aws 4.0.1 kube_prometheus_stack aws-ia/eks-blueprints-addon/aws 1.1.1 metrics_server aws-ia/eks-blueprints-addon/aws 1.1.1 secrets_store_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 secrets_store_csi_driver_provider_aws aws-ia/eks-blueprints-addon/aws 1.1.1 velero aws-ia/eks-blueprints-addon/aws 1.1.1 vpa aws-ia/eks-blueprints-addon/aws 1.1.1"},{"location":"#resources","title":"Resources","text":"Name Type aws_autoscaling_group_tag.aws_node_termination_handler resource aws_autoscaling_lifecycle_hook.aws_node_termination_handler resource aws_cloudwatch_event_rule.aws_node_termination_handler resource aws_cloudwatch_event_rule.karpenter resource aws_cloudwatch_event_target.aws_node_termination_handler resource aws_cloudwatch_event_target.karpenter resource aws_cloudwatch_log_group.aws_for_fluentbit resource aws_cloudwatch_log_group.fargate_fluentbit resource aws_eks_addon.this resource aws_iam_instance_profile.karpenter resource aws_iam_policy.fargate_fluentbit resource aws_iam_role.karpenter resource aws_iam_role_policy_attachment.additional resource aws_iam_role_policy_attachment.karpenter resource helm_release.this resource kubernetes_config_map_v1.aws_logging resource kubernetes_config_map_v1_data.aws_for_fluentbit_containerinsights resource kubernetes_namespace_v1.aws_observability resource time_sleep.this resource aws_caller_identity.current data source aws_eks_addon_version.this data source aws_iam_policy_document.aws_efs_csi_driver data source aws_iam_policy_document.aws_for_fluentbit data source aws_iam_policy_document.aws_fsx_csi_driver data source aws_iam_policy_document.aws_gateway_api_controller data source aws_iam_policy_document.aws_load_balancer_controller data source aws_iam_policy_document.aws_node_termination_handler data source aws_iam_policy_document.aws_privateca_issuer data source aws_iam_policy_document.cert_manager data source aws_iam_policy_document.cluster_autoscaler data source aws_iam_policy_document.external_dns data source aws_iam_policy_document.external_secrets data source aws_iam_policy_document.fargate_fluentbit data source aws_iam_policy_document.karpenter data source aws_iam_policy_document.karpenter_assume_role data source aws_iam_policy_document.velero data source aws_partition.current data source aws_region.current data source"},{"location":"#inputs","title":"Inputs","text":"Name Description Type Default Required argo_events Argo Events add-on configuration values any {} no argo_rollouts Argo Rollouts add-on configuration values any {} no argo_workflows Argo Workflows add-on configuration values any {} no argocd ArgoCD add-on configuration values any {} no aws_cloudwatch_metrics Cloudwatch Metrics add-on configuration values any {} no aws_efs_csi_driver EFS CSI Driver add-on configuration values any {} no aws_for_fluentbit AWS Fluentbit add-on configurations any {} no aws_for_fluentbit_cw_log_group AWS Fluentbit CloudWatch Log Group configurations any {} no aws_fsx_csi_driver FSX CSI Driver add-on configuration values any {} no aws_gateway_api_controller AWS Gateway API Controller add-on configuration values any {} no aws_load_balancer_controller AWS Load Balancer Controller add-on configuration values any {} no aws_node_termination_handler AWS Node Termination Handler add-on configuration values any {} no aws_node_termination_handler_asg_arns List of Auto Scaling group ARNs that AWS Node Termination Handler will monitor for EC2 events list(string) [] no aws_node_termination_handler_sqs AWS Node Termination Handler SQS queue configuration values any {} no aws_privateca_issuer AWS PCA Issuer add-on configurations any {} no bottlerocket_shadow Bottlerocket Update Operator CRDs configuration values any {} no bottlerocket_update_operator Bottlerocket Update Operator add-on configuration values any {} no cert_manager cert-manager add-on configuration values any {} no cert_manager_route53_hosted_zone_arns List of Route53 Hosted Zone ARNs that are used by cert-manager to create DNS records list(string)
    [  \"arn:aws:route53:::hostedzone/*\"]
    no cluster_autoscaler Cluster Autoscaler add-on configuration values any {} no cluster_endpoint Endpoint for your Kubernetes API server string n/a yes cluster_name Name of the EKS cluster string n/a yes cluster_proportional_autoscaler Cluster Proportional Autoscaler add-on configurations any {} no cluster_version Kubernetes <major>.<minor> version to use for the EKS cluster (i.e.: 1.24) string n/a yes create_delay_dependencies Dependency attribute which must be resolved before starting the create_delay_duration list(string) [] no create_delay_duration The duration to wait before creating resources string \"30s\" no create_kubernetes_resources Create Kubernetes resource with Helm or Kubernetes provider bool true no eks_addons Map of EKS add-on configurations to enable for the cluster. Add-on name can be the map keys or set with name any {} no eks_addons_timeouts Create, update, and delete timeout configurations for the EKS add-ons map(string) {} no enable_argo_events Enable Argo Events add-on bool false no enable_argo_rollouts Enable Argo Rollouts add-on bool false no enable_argo_workflows Enable Argo workflows add-on bool false no enable_argocd Enable Argo CD Kubernetes add-on bool false no enable_aws_cloudwatch_metrics Enable AWS Cloudwatch Metrics add-on for Container Insights bool false no enable_aws_efs_csi_driver Enable AWS EFS CSI Driver add-on bool false no enable_aws_for_fluentbit Enable AWS for FluentBit add-on bool false no enable_aws_fsx_csi_driver Enable AWS FSX CSI Driver add-on bool false no enable_aws_gateway_api_controller Enable AWS Gateway API Controller add-on bool false no enable_aws_load_balancer_controller Enable AWS Load Balancer Controller add-on bool false no enable_aws_node_termination_handler Enable AWS Node Termination Handler add-on bool false no enable_aws_privateca_issuer Enable AWS PCA Issuer bool false no enable_bottlerocket_update_operator Enable Bottlerocket Update Operator add-on bool false no enable_cert_manager Enable cert-manager add-on bool false no enable_cluster_autoscaler Enable Cluster autoscaler add-on bool false no enable_cluster_proportional_autoscaler Enable Cluster Proportional Autoscaler bool false no enable_eks_fargate Identifies whether or not respective addons should be modified to support deployment on EKS Fargate bool false no enable_external_dns Enable external-dns operator add-on bool false no enable_external_secrets Enable External Secrets operator add-on bool false no enable_fargate_fluentbit Enable Fargate FluentBit add-on bool false no enable_gatekeeper Enable Gatekeeper add-on bool false no enable_ingress_nginx Enable Ingress Nginx bool false no enable_karpenter Enable Karpenter controller add-on bool false no enable_kube_prometheus_stack Enable Kube Prometheus Stack bool false no enable_metrics_server Enable metrics server add-on bool false no enable_secrets_store_csi_driver Enable CSI Secrets Store Provider bool false no enable_secrets_store_csi_driver_provider_aws Enable AWS CSI Secrets Store Provider bool false no enable_velero Enable Kubernetes Dashboard add-on bool false no enable_vpa Enable Vertical Pod Autoscaler add-on bool false no external_dns external-dns add-on configuration values any {} no external_dns_route53_zone_arns List of Route53 zones ARNs which external-dns will have access to create/manage records (if using Route53) list(string) [] no external_secrets External Secrets add-on configuration values any {} no external_secrets_kms_key_arns List of KMS Key ARNs that are used by Secrets Manager that contain secrets to mount using External Secrets list(string)
    [  \"arn:aws:kms:::key/*\"]
    no external_secrets_secrets_manager_arns List of Secrets Manager ARNs that contain secrets to mount using External Secrets list(string)
    [  \"arn:aws:secretsmanager:::secret:*\"]
    no external_secrets_ssm_parameter_arns List of Systems Manager Parameter ARNs that contain secrets to mount using External Secrets list(string)
    [  \"arn:aws:ssm:::parameter/*\"]
    no fargate_fluentbit Fargate fluentbit add-on config any {} no fargate_fluentbit_cw_log_group AWS Fargate Fluentbit CloudWatch Log Group configurations any {} no gatekeeper Gatekeeper add-on configuration any {} no helm_releases A map of Helm releases to create. This provides the ability to pass in an arbitrary map of Helm chart definitions to create any {} no ingress_nginx Ingress Nginx add-on configurations any {} no karpenter Karpenter add-on configuration values any {} no karpenter_enable_instance_profile_creation Determines whether Karpenter will be allowed to create the IAM instance profile (v1beta1) or if Terraform will (v1alpha1) bool true no karpenter_enable_spot_termination Determines whether to enable native node termination handling bool true no karpenter_node Karpenter IAM role and IAM instance profile configuration values any {} no karpenter_sqs Karpenter SQS queue for native node termination handling configuration values any {} no kube_prometheus_stack Kube Prometheus Stack add-on configurations any {} no metrics_server Metrics Server add-on configurations any {} no oidc_provider_arn The ARN of the cluster OIDC Provider string n/a yes secrets_store_csi_driver CSI Secrets Store Provider add-on configurations any {} no secrets_store_csi_driver_provider_aws CSI Secrets Store Provider add-on configurations any {} no tags A map of tags to add to all resources map(string) {} no velero Velero add-on configuration values any {} no vpa Vertical Pod Autoscaler add-on configuration values any {} no"},{"location":"#outputs","title":"Outputs","text":"Name Description argo_events Map of attributes of the Helm release created argo_rollouts Map of attributes of the Helm release created argo_workflows Map of attributes of the Helm release created argocd Map of attributes of the Helm release created aws_cloudwatch_metrics Map of attributes of the Helm release and IRSA created aws_efs_csi_driver Map of attributes of the Helm release and IRSA created aws_for_fluentbit Map of attributes of the Helm release and IRSA created aws_fsx_csi_driver Map of attributes of the Helm release and IRSA created aws_gateway_api_controller Map of attributes of the Helm release and IRSA created aws_load_balancer_controller Map of attributes of the Helm release and IRSA created aws_node_termination_handler Map of attributes of the Helm release and IRSA created aws_privateca_issuer Map of attributes of the Helm release and IRSA created bottlerocket_update_operator Map of attributes of the Helm release and IRSA created cert_manager Map of attributes of the Helm release and IRSA created cluster_autoscaler Map of attributes of the Helm release and IRSA created cluster_proportional_autoscaler Map of attributes of the Helm release and IRSA created eks_addons Map of attributes for each EKS addons enabled external_dns Map of attributes of the Helm release and IRSA created external_secrets Map of attributes of the Helm release and IRSA created fargate_fluentbit Map of attributes of the configmap and IAM policy created gatekeeper Map of attributes of the Helm release and IRSA created gitops_metadata GitOps Bridge metadata helm_releases Map of attributes of the Helm release created ingress_nginx Map of attributes of the Helm release and IRSA created karpenter Map of attributes of the Helm release and IRSA created kube_prometheus_stack Map of attributes of the Helm release and IRSA created metrics_server Map of attributes of the Helm release and IRSA created secrets_store_csi_driver Map of attributes of the Helm release and IRSA created secrets_store_csi_driver_provider_aws Map of attributes of the Helm release and IRSA created velero Map of attributes of the Helm release and IRSA created vpa Map of attributes of the Helm release and IRSA created"},{"location":"amazon-eks-addons/","title":"Amazon EKS Add-ons","text":"

    The Amazon EKS add-on implementation is generic and can be used to deploy any add-on supported by the EKS API; either native EKS addons or third party add-ons supplied via the AWS Marketplace.

    See the EKS documentation for more details on EKS addon-ons, including the list of Amazon EKS add-ons from Amazon EKS, as well as Additional Amazon EKS add-ons from independent software vendors.

    "},{"location":"amazon-eks-addons/#architecture-support","title":"Architecture Support","text":"

    The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality.

    Add-on x86_64/amd64 arm64 vpc-cni \u2705 \u2705 aws-ebs-csi-driver \u2705 \u2705 coredns \u2705 \u2705 kube-proxy \u2705 \u2705 adot \u2705 \u2705 aws-guardduty-agent \u2705 \u2705"},{"location":"amazon-eks-addons/#usage","title":"Usage","text":"

    The Amazon EKS add-ons are provisioned via a generic interface behind the eks_addons argument which accepts a map of add-on configurations. The generic interface for an add-on is defined below for reference:

    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n\n  # ... truncated for brevity\n\n  eks_addons = {\n    <key> = {\n      name = string # Optional - <key> is used if `name` is not set\n\n      most_recent          = bool\n      addon_version        = string # overrides `most_recent` if set\n      configuration_values = string # JSON string\n\n      preserve                    = bool # defaults to `true`\n      resolve_conflicts_on_create = string # defaults to `OVERWRITE`\n      resolve_conflicts_on_update = string # defaults to `OVERWRITE`\n\n      timeouts = {\n        create = string # optional\n        update = string # optional\n        delete = string # optional\n      }\n\n      tags = map(string)\n    }\n  }\n}\n
    "},{"location":"amazon-eks-addons/#example","title":"Example","text":"
    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n\n  # ... truncated for brevity\n\n  eks_addons = {\n    # Amazon EKS add-ons\n    aws-ebs-csi-driver = {\n      most_recent              = true\n      service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn\n    }\n\n    coredns = {\n      most_recent = true\n\n      timeouts = {\n        create = \"25m\"\n        delete = \"10m\"\n      }\n    }\n\n    vpc-cni = {\n      most_recent              = true\n      service_account_role_arn = module.vpc_cni_irsa.iam_role_arn\n    }\n\n    kube-proxy = {}\n\n    # Third party add-ons via AWS Marketplace\n    kubecost_kubecost = {\n      most_recent = true\n    }\n\n    teleport_teleport = {\n      most_recent = true\n    }\n  }\n}\n
    "},{"location":"amazon-eks-addons/#configuration-values","title":"Configuration Values","text":"

    You can supply custom configuration values to each addon via the configuration_values argument of the add-on definition. The value provided must be a JSON encoded string and adhere to the JSON scheme provided by the version of the add-on. You can view this schema using the awscli by supplying the add-on name and version to the describe-addon-configuration command:

    aws eks describe-addon-configuration \\\n--addon-name coredns \\\n--addon-version v1.10.1-eksbuild.2 \\\n--query 'configurationSchema' \\\n--output text | jq\n

    Which returns the formatted JSON schema like below:

    {\n  \"$ref\": \"#/definitions/Coredns\",\n  \"$schema\": \"http://json-schema.org/draft-06/schema#\",\n  \"definitions\": {\n    \"Coredns\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"affinity\": {\n          \"default\": {\n            \"affinity\": {\n              \"nodeAffinity\": {\n                \"requiredDuringSchedulingIgnoredDuringExecution\": {\n                  \"nodeSelectorTerms\": [\n                    {\n                      \"matchExpressions\": [\n                        {\n                          \"key\": \"kubernetes.io/os\",\n                          \"operator\": \"In\",\n                          \"values\": [\n                            \"linux\"\n                          ]\n                        },\n                        {\n                          \"key\": \"kubernetes.io/arch\",\n                          \"operator\": \"In\",\n                          \"values\": [\n                            \"amd64\",\n                            \"arm64\"\n                          ]\n                        }\n                      ]\n                    }\n                  ]\n                }\n              },\n              \"podAntiAffinity\": {\n                \"preferredDuringSchedulingIgnoredDuringExecution\": [\n                  {\n                    \"podAffinityTerm\": {\n                      \"labelSelector\": {\n                        \"matchExpressions\": [\n                          {\n                            \"key\": \"k8s-app\",\n                            \"operator\": \"In\",\n                            \"values\": [\n                              \"kube-dns\"\n                            ]\n                          }\n                        ]\n                      },\n                      \"topologyKey\": \"kubernetes.io/hostname\"\n                    },\n                    \"weight\": 100\n                  }\n                ]\n              }\n            }\n          },\n          \"description\": \"Affinity of the coredns pods\",\n          \"type\": [\n            \"object\",\n            \"null\"\n          ]\n        },\n        \"computeType\": {\n          \"type\": \"string\"\n        },\n        \"corefile\": {\n          \"description\": \"Entire corefile contents to use with installation\",\n          \"type\": \"string\"\n        },\n        \"nodeSelector\": {\n          \"additionalProperties\": {\n            \"type\": \"string\"\n          },\n          \"type\": \"object\"\n        },\n        \"replicaCount\": {\n          \"type\": \"integer\"\n        },\n        \"resources\": {\n          \"$ref\": \"#/definitions/Resources\"\n        },\n        \"tolerations\": {\n          \"default\": [\n            {\n              \"key\": \"CriticalAddonsOnly\",\n              \"operator\": \"Exists\"\n            },\n            {\n              \"key\": \"node-role.kubernetes.io/master\",\n              \"operator\": \"NoSchedule\"\n            }\n          ],\n          \"description\": \"Tolerations of the coredns pod\",\n          \"items\": {\n            \"type\": \"object\"\n          },\n          \"type\": \"array\"\n        },\n        \"topologySpreadConstraints\": {\n          \"description\": \"The coredns pod topology spread constraints\",\n          \"type\": \"array\"\n        }\n      },\n      \"title\": \"Coredns\",\n      \"type\": \"object\"\n    },\n    \"Limits\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"cpu\": {\n          \"type\": \"string\"\n        },\n        \"memory\": {\n          \"type\": \"string\"\n        }\n      },\n      \"title\": \"Limits\",\n      \"type\": \"object\"\n    },\n    \"Resources\": {\n      \"additionalProperties\": false,\n      \"properties\": {\n        \"limits\": {\n          \"$ref\": \"#/definitions/Limits\"\n        },\n        \"requests\": {\n          \"$ref\": \"#/definitions/Limits\"\n        }\n      },\n      \"title\": \"Resources\",\n      \"type\": \"object\"\n    }\n  }\n}\n

    You can supply the configuration values to the add-on by passing a map of the values wrapped in the jsonencode() function as shown below:

    module \"eks_blueprints_addons\" {\n  source = \"aws-ia/eks-blueprints-addons/aws\"\n\n  # ... truncated for brevity\n\n  eks_addons = {\n     coredns = {\n      most_recent = true\n      configuration_values = jsonencode({\n        replicaCount = 4\n        tolerations = [\n        {\n          key      = \"dedicated\",\n          operator = \"Equal\",\n          effect   = \"NoSchedule\",\n          value    = \"orchestration-seb\"\n        }\n        ]\n\n        topologySpreadConstraints = [\n          {\n            maxSkew = 1\n            topologyKey = \"topology.kubernetes.io/zone\"\n            whenUnsatisfiable = \"ScheduleAnyway\"\n            labelSelector = {\n              matchLabels = {\n                k8s-app: \"kube-dns\"\n              }\n            }\n          }\n        ]\n\n        affinity = {\n          nodeAffinity = {\n            requiredDuringSchedulingIgnoredDuringExecution = {\n              nodeSelectorTerms = [\n              {\n                matchExpressions = [\n                  {\n                    key = \"kubernetes.io/os\"\n                    operator = \"In\"\n                    values = [\"linux\"]\n                  },\n                  {\n                    key = \"kubernetes.io/arch\"\n                    operator = \"In\"\n                    values = [\"amd64\"]\n                  }\n                ]\n              }]\n            }\n          }\n\n          podAffinity = {\n            requiredDuringSchedulingIgnoredDuringExecution = [{\n                labelSelector = {\n                  matchExpressions = [\n                    {\n                      key = \"k8s-app\"\n                      operator = \"NotIn\"\n                      values = [\"kube-dns\"]\n                    }\n                  ]\n                }\n                topologyKey = \"kubernetes.io/hostname\"\n            }\n            ]\n          }\n\n          podAntiAffinity = {\n            preferredDuringSchedulingIgnoredDuringExecution = [{\n              podAffinityTerm = {\n                labelSelector = {\n                  matchExpressions = [\n                    {\n                      key = \"k8s-app\"\n                      operator = \"In\"\n                      values = [\"kube-dns\"]\n                    }\n                  ]\n                }\n                topologyKey = \"kubernetes.io/hostname\"\n              }\n              weight = 100\n              }\n            ]\n\n            requiredDuringSchedulingIgnoredDuringExecution = [{\n                labelSelector = {\n                  matchExpressions = [\n                    {\n                      key = \"k8s-app\"\n                      operator = \"In\"\n                      values = [\"kube-dns\"]\n                    }\n                  ]\n                }\n                topologyKey = \"kubernetes.io/hostname\"\n              }\n            ]\n          }\n\n        }\n\n        resources = {\n          limits = {\n            cpu    = \"100m\"\n            memory = \"150Mi\"\n          }\n          requests = {\n            cpu    = \"100m\"\n            memory = \"150Mi\"\n        }\n      })\n    }\n
    "},{"location":"architectures/","title":"Architectures","text":""},{"location":"architectures/#addons","title":"Addons","text":"Addon x86_64/amd64 arm64 Argo Rollouts \u2705 \u2705 Argo Workflows \u2705 \u2705 Argo CD \u2705 \u2705 AWS CloudWatch Metrics \u2705 \u2705 AWS EFS CSI Driver \u2705 \u2705 AWS for FluentBit \u2705 \u2705 AWS FSx CSI Driver \u2705 \u2705 AWS Load Balancer Controller \u2705 \u2705 AWS Node Termination Handler \u2705 \u2705 AWS Private CA Issuer \u2705 \u2705 Cert Manager \u2705 \u2705 Cluster Autoscaler \u2705 \u2705 Cluster Proportional Autoscaler \u2705 \u2705 External DNS \u2705 \u2705 External Secrets \u2705 \u2705 OPA Gatekeeper \u2705 \u2705 Ingress Nginx \u2705 \u2705 Karpenter \u2705 \u2705 Kube-Prometheus Stack \u2705 \u2705 Metrics Server \u2705 \u2705 Secrets Store CSI Driver \u2705 \u2705 Secrets Store CSI Driver Provider AWS \u2705 \u2705 Velero \u2705 \u2705 Vertical Pod Autoscaler \u2705 \u2705"},{"location":"architectures/#amazon-eks-addons","title":"Amazon EKS Addons","text":"

    The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality. These addons are specified via the eks_addons input variable.

    Addon x86_64/amd64 arm64 AWS VPC CNI \u2705 \u2705 AWS EBS CSI Driver \u2705 \u2705 CoreDNS \u2705 \u2705 Kube-proxy \u2705 \u2705 ADOT Collector \u2705 \u2705 AWS GuardDuty Agent \u2705 \u2705"},{"location":"aws-partner-addons/","title":"AWS Partner Addons","text":"

    The following addons are provided by AWS Partners for use with Amazon EKS Blueprints for Terraform. Please see the respective addon repository for more information on the addon, its supported configuration values, as well as questions, comments, and feature requests.

    Addon Description Ondat Ondat is a Kubernetes-native storage platform that enables stateful applications to run on Kubernetes. Hashicorp - Consul Consul is a service networking solution to automate network configurations, discover services, and enable secure connectivity across any cloud or runtime. Hashicorp - Vault Vault secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets in modern computing. Sysdig Sysdig CNAPP helps you stop cloud and container security attacks with no wasted time. Tetrate Istio Tetrate Istio Distro is an open source project from Tetrate that provides vetted builds of Istio tested against all major cloud platforms. NetApp ONTAP Astra Trident NetApp's Astra Trident provides dynamic storage orchestration for FSx for NetApp ONTAP using a Container Storage Interface (CSI) compliant driver. Kong Konnect - Kong Gateway Kong Gateway is the fastest and most adopted API gateway that integrates with Kong Konnect, the end-to-end SaaS API lifecycle management platform. Kong Konnect - Kong Ingress Controller Kong Ingress Controller combines the powerful features of the widely popular Kong Gateway with Kubernetes in a truly Kubernetes-native manner and now integrated with Kong Konnect, the end-to-end SaaS API lifecycle management platform. Kong Konnect - Kong Mesh Manager Kong Mesh is the most flexible, enterprise-proven, service-to-service connectivity solution for developing modern applications that drive compelling digital experiences for an organization\u2019s customers. CloudBees CI CloudBees CI is a highly scalable, resilient, and flexible continuous integration (CI) solution based on the popular Jenkins build orchestration tool. It provides a centralized, shared, and self-service experience tailored for all development teams utilizing Jenkins."},{"location":"helm-release/","title":"Helm Release Add-ons","text":"

    Starting with EKS Blueprints v5 we have made a decision to only support the provisioning of a certain core set of add-ons. On an going basis, we will evaluate the current list to see if more add-ons need to be supported via this repo. Typically you can expect that any AWS created add-on that is not yet available via the Amazon EKS add-ons will be prioritized to be provisioned through this repository.

    In addition to these AWS add-ons, we will also support the provisioning of certain OSS add-ons that we think customers will benefit from. These are selected based on customer demand (e.g. metrics-server) and certain patterns (gitops) that are foundational elements for a complete blueprint of an EKS cluster.

    One of the reasons customers pick Kubernetes is because of its strong commercial and open-source software ecosystem and would like to provision add-ons that are not necessarily supported by EKS Blueprints. For such add-ons the options are as following:

    "},{"location":"helm-release/#with-helm_release-terraform-resource","title":"With helm_release Terraform Resource","text":"

    The helm_release resource is the most fundamental way to provision a helm chart via Terraform.

    Use this resource, if you need to control the lifecycle add-ons down to level of each add-on resource.

    "},{"location":"helm-release/#with-helm_releases-variable","title":"With helm_releases Variable","text":"

    You can use the helm_releases variable in EKS Blueprints Add-ons to provide a map of add-ons and their respective Helm configuration. Under the hood, we just iterate through the provided map and pass each configuration to the Terraform helm_release resource.

    E.g.

    module \"addons\" {\n  source  = \"aws-ia/eks-blueprints-addons/aws\"\n  version = \"~> 1.0\"\n\n  cluster_name      = \"<cluster_name>\"\n  cluster_endpoint  = \"<cluster_endpoint>\"\n  cluster_version   = \"<cluster_version>\"\n  oidc_provider_arn = \"<oidc_provider_arn>\"\n\n  # EKS add-ons\n  eks_addons = {\n    coredns = {}\n    vpc-cni = {}\n    kube-proxy = {}\n  }\n\n  # Blueprints add-ons\n  enable_aws_efs_csi_driver                    = true\n  enable_aws_cloudwatch_metrics                = true\n  enable_cert_manager                          = true\n  ...\n\n  # Pass in any number of Helm charts to be created for those that are not natively supported\n  helm_releases = {\n    prometheus-adapter = {\n      description      = \"A Helm chart for k8s prometheus adapter\"\n      namespace        = \"prometheus-adapter\"\n      create_namespace = true\n      chart            = \"prometheus-adapter\"\n      chart_version    = \"4.2.0\"\n      repository       = \"https://prometheus-community.github.io/helm-charts\"\n      values = [\n        <<-EOT\n          replicas: 2\n          podDisruptionBudget:\n            enabled: true\n        EOT\n      ]\n    }\n    gpu-operator = {\n      description      = \"A Helm chart for NVIDIA GPU operator\"\n      namespace        = \"gpu-operator\"\n      create_namespace = true\n      chart            = \"gpu-operator\"\n      chart_version    = \"v23.3.2\"\n      repository       = \"https://nvidia.github.io/gpu-operator\"\n      values = [\n        <<-EOT\n          operator:\n            defaultRuntime: containerd\n        EOT\n      ]\n    }\n  }\n\n  tags = local.tags\n}\n

    With this pattern, the lifecycle of all your add-ons is tied to that of the addons module. This allows you to easily target the addon module in your Terraform apply and destroy commands. E.g.

    terraform apply -target=module.addons\n\nterraform destroy -target=module.addons\n
    "},{"location":"helm-release/#with-eks-blueprints-addon-module","title":"With EKS Blueprints Addon Module","text":"

    If you have an add-on that requires an IAM Role for Service Account (IRSA), we have created a new Terraform module terraform-aws-eks-blueprints-addon that can help provision a Helm chart along with an IAM role and policies with permissions required for the add-on to function properly. We use this module for all of the add-ons that are provisioned by EKS Blueprints Add-ons today.

    You can optionally use this module for add-ons that do not need IRSA or even just to create the IAM resources for IRSA and skip the helm release. Detailed usage of how to consume this module can be found in its readme.

    This pattern can be used to create a Terraform module with a set of add-ons that are not supported in the EKS Blueprints Add-ons today and wrap them in the same module definition. An example of this is the ACK add-ons repository which is a collection of ACK helm chart deployments with IRSA for each of the ACK controllers.

    "},{"location":"addons/argo-events/","title":"Argo Events","text":"

    Argo Events is an open source container-native event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources. Argo Events is implemented as a Kubernetes CRD (Custom Resource Definition).

    "},{"location":"addons/argo-events/#usage","title":"Usage","text":"

    Argo Events can be deployed by enabling the add-on via the following.

    enable_argo_events = true\n

    You can optionally customize the Helm chart that deploys Argo Events via the following configuration.

      enable_argo_events = true\n\n  argo_events = {\n    name          = \"argo-events\"\n    chart_version = \"2.4.0\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argo-events\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argo-events pods are running.

    $ kubectl get pods -n argo-events\nNAME                                                  READY   STATUS    RESTARTS   AGE\nargo-events-controller-manager-bfb894cdb-k8hzn        1/1     Running   0          11m\n
    "},{"location":"addons/argo-rollouts/","title":"Argo Rollouts","text":"

    Argo Rollouts is a Kubernetes controller and set of CRDs which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes.

    "},{"location":"addons/argo-rollouts/#usage","title":"Usage","text":"

    Argo Rollouts can be deployed by enabling the add-on via the following.

    enable_argo_rollouts = true\n

    You can optionally customize the Helm chart that deploys Argo Rollouts via the following configuration.

      enable_argo_rollouts = true\n\n  argo_rollouts = {\n    name          = \"argo-rollouts\"\n    chart_version = \"2.22.3\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argo-rollouts\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argo-rollouts pods are running.

    $ kubectl get pods -n argo-rollouts\nNAME                             READY   STATUS    RESTARTS   AGE\nargo-rollouts-5db5688849-x89zb   0/1     Running   0          11s\n
    "},{"location":"addons/argo-workflows/","title":"Argo Workflows","text":"

    Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).

    "},{"location":"addons/argo-workflows/#usage","title":"Usage","text":"

    Argo Workflows can be deployed by enabling the add-on via the following.

    enable_argo_workflows = true\n

    You can optionally customize the Helm chart that deploys Argo Workflows via the following configuration.

      enable_argo_workflows = true\n\n  argo_workflows = {\n    name          = \"argo-workflows\"\n    chart_version = \"0.28.2\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argo-workflows\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argo-workflows pods are running.

    $ kubectl get pods -n argo-workflows\nNAME                                                  READY   STATUS    RESTARTS   AGE\nargo-workflows-server-68988cd864-22zhr                1/1     Running   0          6m32s\nargo-workflows-workflow-controller-7ff7b5658d-9q44f   1/1     Running   0          6m32s\n
    "},{"location":"addons/argocd/","title":"Argo CD","text":"

    Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.

    "},{"location":"addons/argocd/#usage","title":"Usage","text":"

    Argo CD can be deployed by enabling the add-on via the following.

    enable_argocd = true\n

    You can optionally customize the Helm chart that deploys Argo CD via the following configuration.

      enable_argocd = true\n\n  argocd = {\n    name          = \"argocd\"\n    chart_version = \"5.29.1\"\n    repository    = \"https://argoproj.github.io/argo-helm\"\n    namespace     = \"argocd\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify argocd pods are running.

    $ kubectl get pods -n argocd\nNAME                                                        READY   STATUS    RESTARTS   AGE\nargo-cd-argocd-application-controller-0                     1/1     Running   0          146m\nargo-cd-argocd-applicationset-controller-678d85f77b-rmpcb   1/1     Running   0          146m\nargo-cd-argocd-dex-server-7b6c9b5969-zpqnl                  1/1     Running   0          146m\nargo-cd-argocd-notifications-controller-6d489b99c9-j6fdw    1/1     Running   0          146m\nargo-cd-argocd-redis-59dd95f5b5-8fx74                       1/1     Running   0          146m\nargo-cd-argocd-repo-server-7b9bd88c95-mh2fz                 1/1     Running   0          146m\nargo-cd-argocd-server-6f9cfdd4d5-8mfpc                      1/1     Running   0          146m\n
    "},{"location":"addons/aws-cloudwatch-metrics/","title":"AWS CloudWatch Metrics","text":"

    Use AWS CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.

    Container Insights collects data as performance log events using embedded metric format. These performance log events are entries that use a structured JSON schema that enables high-cardinality data to be ingested and stored at scale. From this data, CloudWatch creates aggregated metrics at the cluster, node, pod, task, and service level as CloudWatch metrics. The metrics that Container Insights collects are available in CloudWatch automatic dashboards, and also viewable in the Metrics section of the CloudWatch console.

    "},{"location":"addons/aws-cloudwatch-metrics/#usage","title":"Usage","text":"

    AWS CloudWatch Metrics can be deployed by enabling the add-on via the following.

    enable_aws_cloudwatch_metrics = true\n

    You can also customize the Helm chart that deploys aws-cloudwatch-metrics via the following configuration:

      enable_aws_cloudwatch_metrics        = true\n\n  aws_cloudwatch_metrics_irsa_policies = [\"IAM Policies\"]\n  aws_cloudwatch_metrics   = {\n    role_policies = [\"IAM Policies\"]  # extra policies in addition of CloudWatchAgentServerPolicy\n    name          = \"aws-cloudwatch-metrics\"\n    repository    = \"https://aws.github.io/eks-charts\"\n    chart_version = \"0.0.9\"\n    namespace     = \"amazon-cloudwatch\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})] # The value `clusterName` is already set to the EKS cluster name, no need to specify here\n  }\n

    Verify aws-cloudwatch-metrics pods are running

    $ kubectl get pods -n amazon-cloudwatch\n\nNAME                           READY   STATUS    RESTARTS   AGE\naws-cloudwatch-metrics-2dt5h   1/1     Running   0          149m\n
    "},{"location":"addons/aws-efs-csi-driver/","title":"AWS EFS CSI Driver","text":"

    This add-on deploys the AWS EFS CSI driver into an EKS cluster.

    "},{"location":"addons/aws-efs-csi-driver/#usage","title":"Usage","text":"

    The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.

      enable_aws_efs_csi_driver = true\n

    You can optionally customize the Helm chart that deploys the driver via the following configuration.

      enable_aws_efs_csi_driver = true\n\n  # Optional aws_efs_csi_driver_helm_config\n  aws_efs_csi_driver = {\n    repository     = \"https://kubernetes-sigs.github.io/aws-efs-csi-driver/\"\n    chart_version  = \"2.4.1\"\n  }\n  aws_efs_csi_driver {\n    role_policies = [\"<ADDITIONAL_IAM_POLICY_ARN>\"]\n  }\n

    Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

    $ kubectl get deployment efs-csi-controller -n kube-system\n\nNAME                 READY   UP-TO-DATE   AVAILABLE   AGE\nefs-csi-controller   2/2     2            2           4m29s\n
    $ kubectl get daemonset efs-csi-node -n kube-system\n\nNAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE\nefs-csi-node   3         3         3       3            3           beta.kubernetes.io/os=linux   4m32s\n
    "},{"location":"addons/aws-efs-csi-driver/#validate-efs-csi-driver","title":"Validate EFS CSI Driver","text":"

    Follow the static provisioning example described here to validate the CSI driver is working as expected.

    "},{"location":"addons/aws-for-fluentbit/","title":"AWS for Fluent Bit","text":"

    AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.

    "},{"location":"addons/aws-for-fluentbit/#usage","title":"Usage","text":"

    AWS for Fluent Bit can be deployed by enabling the add-on via the following.

    enable_aws_for_fluentbit = true\n

    You can optionally customize the Helm chart that deploys AWS for Fluent Bit via the following configuration.

      enable_aws_for_fluentbit = true\n  aws_for_fluentbit_cw_log_group = {\n    create          = true\n    use_name_prefix = true # Set this to true to enable name prefix\n    name_prefix     = \"eks-cluster-logs-\"\n    retention       = 7\n  }\n  aws_for_fluentbit = {\n    name          = \"aws-for-fluent-bit\"\n    chart_version = \"0.1.28\"\n    repository    = \"https://aws.github.io/eks-charts\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    If you want to enable Container Insights on Amazon EKS through Fluent Bit, you need to add the following parameter in your configuration:

      enable_aws_for_fluentbit = true\n  aws_for_fluentbit = {\n    enable_containerinsights = true\n  }\n

    By default, ClusterInsights will not enable the kubelet monitoring feature, with AWS for FluentBit integration, since this is an optional feature that is suggested to be enabled only on large clusters. To enable the ClusterInsights Use_Kubelet feature you'll need to provide a few more parametees:

      enable_aws_for_fluentbit = true\n  aws_for_fluentbit = {\n    enable_containerinsights = true\n    kubelet_monitoring       = true\n    set = [{\n        name  = \"cloudWatchLogs.autoCreateGroup\"\n        value = true\n      },\n      {\n        name  = \"hostNetwork\"\n        value = true\n      },\n      {\n        name  = \"dnsPolicy\"\n        value = \"ClusterFirstWithHostNet\"\n      }\n    ]\n  }\n
    "},{"location":"addons/aws-for-fluentbit/#verify-the-fluent-bit-setup","title":"Verify the Fluent Bit setup","text":"

    Verify aws-for-fluentbit pods are running.

    $ kubectl -n kube-system get pods -l app.kubernetes.io/name=aws-for-fluent-bit\nNAME                       READY   STATUS    RESTARTS   AGE\naws-for-fluent-bit-6lhkj   1/1     Running   0          15m\naws-for-fluent-bit-sbn9b   1/1     Running   0          15m\naws-for-fluent-bit-svhwq   1/1     Running   0          15m\n

    Open the CloudWatch console

    In the navigation pane, choose Log groups.

    Make sure that you're in the Region where you deployed Fluent Bit.

    Check the list of log groups in the Region. You should see the following:

    /aws/eks/complete/aws-fluentbit-logs\n

    If you enabled Container Insights, you should also see the following Log Groups in your CloudWatch Console.

    /aws/containerinsights/Cluster_Name/application\n\n/aws/containerinsights/Cluster_Name/host\n\n/aws/containerinsights/Cluster_Name/dataplane\n

    Navigate to one of these log groups and check the Last Event Time for the log streams. If it is recent relative to when you deployed Fluent Bit, the setup is verified.

    There might be a slight delay in creating the /dataplane log group. This is normal as these log groups only get created when Fluent Bit starts sending logs for that log group.

    "},{"location":"addons/aws-fsx-csi-driver/","title":"AWS FSx CSI Driver","text":"

    This add-on deploys the Amazon FSx CSI Driver in to an Amazon EKS Cluster.

    "},{"location":"addons/aws-fsx-csi-driver/#usage","title":"Usage","text":"

    The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.

      enable_aws_fsx_csi_driver = true\n
    "},{"location":"addons/aws-fsx-csi-driver/#helm-chart-customization","title":"Helm Chart customization","text":"

    You can optionally customize the Helm chart deployment using a configuration like the following.

      enable_aws_fsx_csi_driver = true\n  aws_fsx_csi_driver = {\n    namespace     = \"aws-fsx-csi-driver\"\n    chart_version = \"1.6.0\"\n    role_policies = <ADDITIONAL_IAM_POLICY_ARN>\n  }\n

    You can find all available Helm Chart parameter values here

    "},{"location":"addons/aws-fsx-csi-driver/#validation","title":"Validation","text":"

    Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

    $ kubectl -n kube-system get deployment fsx-csi-controller\n\nNAME                 READY   UP-TO-DATE   AVAILABLE   AGE\nfsx-csi-controller   2/2     2            2           4m29s\n\n$ kubectl -n kube-system get pods -l app=fsx-csi-controller\nNAME                                  READY   STATUS    RESTARTS   AGE\nfsx-csi-controller-56c6d9bbb8-89cpc   4/4     Running   0          3m30s\nfsx-csi-controller-56c6d9bbb8-9wnlh   4/4     Running   0          3m30s\n
    $ kubectl -n kube-system get daemonset fsx-csi-node\nNAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE\nfsx-csi-node   3         3         3       3            3           kubernetes.io/os=linux   5m27s\n\n$ kubectl -n kube-system get pods -l  app=fsx-csi-node\nNAME                 READY   STATUS    RESTARTS   AGE\nfsx-csi-node-7c5z6   3/3     Running   0          5m29s\nfsx-csi-node-d5q28   3/3     Running   0          5m29s\nfsx-csi-node-hlg8q   3/3     Running   0          5m29s\n

    Create a StorageClass. Replace the SubnetID and the SecurityGroupID with your own values. More details here.

    $ cat <<EOF | kubectl apply -f -\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n  name: fsx-sc\nprovisioner: fsx.csi.aws.com\nparameters:\n  subnetId: <YOUR_SUBNET_IDs>\n  securityGroupIds: <YOUR_SG_ID>\n  perUnitStorageThroughput: \"200\"\n  deploymentType: PERSISTENT_1\nmountOptions:\n  - flock\nEOF\n
    $ kubect describe storageclass fsx-sc\nName:            fsx-sc\nIsDefaultClass:  No\nAnnotations:     kubectl.kubernetes.io/last-applied-configuration={\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{},\"name\":\"fsx-sc\"},\"mountOptions\":null,\"parameters\":{\"deploymentType\":\"PERSISTENT_1\",\"perUnitStorageThroughput\":\"200\",\"securityGroupIds\":\"sg-q1w2e3r4t5y6u7i8o\",\"subnetId\":\"subnet-q1w2e3r4t5y6u7i8o\"},\"provisioner\":\"fsx.csi.aws.com\"}\n\nProvisioner:           fsx.csi.aws.com\nParameters:            deploymentType=PERSISTENT_1,perUnitStorageThroughput=200,securityGroupIds=sg-q1w2e3r4t5y6u7i8o,subnetId=subnet-q1w2e3r4t5y6u7i8o\nAllowVolumeExpansion:  <unset>\nMountOptions:          <none>\nReclaimPolicy:         Delete\nVolumeBindingMode:     Immediate\nEvents:                <none>\n

    Create a PVC.

    $ cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: fsx-claim\nspec:\n  accessModes:\n    - ReadWriteMany\n  storageClassName: fsx-sc\n  resources:\n    requests:\n      storage: 1200Gi\nEOF\n

    Wait for the PV to be created and bound to your PVC.

    $ kubectl get pvc\nNAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nfsx-claim   Bound    pvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            fsx-sc         7m47s\n$ kubectl get pv\nNAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   REASON   AGE\npvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            Delete           Bound    default/fsx-claim   fsx-sc                  2m13s\n
    "},{"location":"addons/aws-gateway-api-controller/","title":"AWS Gateway API Controller","text":"

    AWS Gateway API Controller lets you connect services across multiple Kubernetes clusters through the Kubernetes Gateway API interface. It is also designed to connect services running on EC2 instances, containers, and as serverless functions. It does this by leveraging Amazon VPC Lattice, which works with Kubernetes Gateway API calls to manage Kubernetes objects.

    "},{"location":"addons/aws-gateway-api-controller/#usage","title":"Usage","text":"

    AWS Gateway API Controller can be deployed by enabling the add-on via the following.

      enable_aws_gateway_api_controller = true\n  aws_gateway_api_controller = {\n    repository_username = data.aws_ecrpublic_authorization_token.token.user_name\n    repository_password = data.aws_ecrpublic_authorization_token.token.password\n    set = [{\n      name  = \"clusterVpcId\"\n      value = \"vpc-12345abcd\"\n    }]\n}\n

    You can optionally customize the Helm chart that deploys AWS Gateway API Controller via the following configuration.

      enable_aws_gateway_api_controller = true\n  aws_gateway_api_controller = {\n    name                = \"aws-gateway-api-controller\"\n    chart_version       = \"v0.0.12\"\n    repository          = \"oci://public.ecr.aws/aws-application-networking-k8s\"\n    repository_username = data.aws_ecrpublic_authorization_token.token.user_name\n    repository_password = data.aws_ecrpublic_authorization_token.token.password\n    namespace           = \"aws-application-networking-system\"\n    values              = [templatefile(\"${path.module}/values.yaml\", {})]\n    set = [{\n      name  = \"clusterVpcId\"\n      value = \"vpc-12345abcd\"\n    }]\n  }\n

    Verify aws-gateway-api-controller pods are running.

    $ kubectl get pods -n aws-application-networking-system\nNAME                                                               READY   STATUS    RESTARTS   AGE\naws-gateway-api-controller-aws-gateway-controller-chart-8f42q426   1/1     Running   0          40s\naws-gateway-api-controller-aws-gateway-controller-chart-8f4tbl9g   1/1     Running   0          71s\n

    Deploy example GatewayClass

    $ kubectl apply -f https://raw.githubusercontent.com/aws/aws-application-networking-k8s/main/examples/gatewayclass.yaml\ngatewayclass.gateway.networking.k8s.io/amazon-vpc-lattice created\n

    Describe GatewayClass

    $ kubectl describe gatewayclass\nName:         amazon-vpc-lattice\nNamespace:\nLabels:       <none>\nAnnotations:  <none>\nAPI Version:  gateway.networking.k8s.io/v1beta1\nKind:         GatewayClass\nMetadata:\n  Creation Timestamp:  2023-06-22T22:33:32Z\n  Generation:          1\n  Resource Version:    819021\n  UID:                 aac59195-8f37-4c23-a2a5-b0f363deda77\nSpec:\n  Controller Name:  application-networking.k8s.aws/gateway-api-controller\nStatus:\n  Conditions:\n    Last Transition Time:  2023-06-22T22:33:32Z\n    Message:               Accepted\n    Observed Generation:   1\n    Reason:                Accepted\n    Status:                True\n    Type:                  Accepted\nEvents:                    <none>\n
    "},{"location":"addons/aws-load-balancer-controller/","title":"AWS Load Balancer Controller","text":"

    AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.

    "},{"location":"addons/aws-load-balancer-controller/#usage","title":"Usage","text":"

    In order to deploy the AWS Load Balancer Controller Addon via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

    NOTE: In versions 2.5 and newer, the AWS Load Balancer Controller becomes the default controller for Kubernetes service resources with the type: LoadBalancer and makes an AWS Network Load Balancer (NLB) for each service. It does this by making a mutating webhook for services, which sets the spec.loadBalancerClass field to service.k8s.aws/nlb for new services of type: LoadBalancer. You can turn off this feature and revert to using the legacy Cloud Provider as the default controller, by setting the helm chart value enableServiceMutatorWebhook to false. The cluster won't provision new Classic Load Balancers for your services unless you turn off this feature. Existing Classic Load Balancers will continue to work.

    module \"eks_blueprints_addons\" {\n\n  enable_aws_load_balancer_controller = true\n  aws_load_balancer_controller = {\n    set = [\n      {\n        name  = \"vpcId\"\n        value = module.vpc.vpc_id\n      },\n      {\n        name  = \"podDisruptionBudget.maxUnavailable\"\n        value = 1\n      },\n      {\n        name  = \"enableServiceMutatorWebhook\"\n        value = \"false\"\n      }\n    ]\n  }\n
    "},{"location":"addons/aws-load-balancer-controller/#helm-chart-customization","title":"Helm Chart customization","text":"

    It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

      aws_load_balancer_controller = {\n    set = [\n      {\n        name  = \"vpcId\"\n        value = module.vpc.vpc_id\n      },\n      {\n        name  = \"podDisruptionBudget.maxUnavailable\"\n        value = 1\n      },\n      {\n        name  = \"resources.requests.cpu\"\n        value = 100m\n      },\n      {\n        name  = \"resources.requests.memory\"\n        value = 128Mi\n      },\n    ]\n  }\n}\n

    You can find all available Helm Chart parameter values here.

    "},{"location":"addons/aws-load-balancer-controller/#validate","title":"Validate","text":"
    1. To validate the deployment, check if the aws-load-balancer-controller Pods were created in the kube-system Namespace, as the following example.
    kubectl -n kube-system get pods | grep aws-load-balancer-controller\nNAMESPACE       NAME                                            READY   STATUS    RESTARTS   AGE\nkube-system     aws-load-balancer-controller-6cbdb58654-fvskt   1/1     Running   0          26m\nkube-system     aws-load-balancer-controller-6cbdb58654-sc7dk   1/1     Running   0          26m\n
    1. Create a Kubernetes Ingress, using the alb IngressClass, pointing to an existing Service. In this example we'll use a Service called example-svc.
    kubectl create ingress example-ingress --class alb --rule=\"/*=example-svc:80\" \\\n--annotation alb.ingress.kubernetes.io/scheme=internet-facing \\\n--annotation alb.ingress.kubernetes.io/target-type=ip\n
    kubectl get ingress\nNAME                CLASS   HOSTS   ADDRESS                                                                 PORTS   AGE\nexample-ingress     alb     *       k8s-example-ingress-7e0d6f03e7-1234567890.us-west-2.elb.amazonaws.com   80      4m9s\n
    "},{"location":"addons/aws-load-balancer-controller/#resources","title":"Resources","text":"

    GitHub Repo Helm Chart AWS Docs

    "},{"location":"addons/aws-node-termination-handler/","title":"AWS Node Termination Handler","text":"

    This project ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as EC2 maintenance events, EC2 Spot interruptions, ASG Scale-In, ASG AZ Rebalance, and EC2 Instance Termination via the API or Console. If not handled, your application code may not stop gracefully, take longer to recover full availability, or accidentally schedule work to nodes that are going down.

    "},{"location":"addons/aws-node-termination-handler/#usage","title":"Usage","text":"

    AWS Node Termination Handler can be deployed by enabling the add-on via the following.

    enable_aws_node_termination_handler = true\n

    You can optionally customize the Helm chart that deploys AWS Node Termination Handler via the following configuration.

      enable_aws_node_termination_handler = true\n\n  aws_node_termination_handler = {\n    name          = \"aws-node-termination-handler\"\n    chart_version = \"0.21.0\"\n    repository    = \"https://aws.github.io/eks-charts\"\n    namespace     = \"aws-node-termination-handler\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify aws-node-termination-handler pods are running.

    $ kubectl get pods -n aws-node-termination-handler\nNAME                                            READY   STATUS    RESTARTS      AGE\naws-node-termination-handler-6f598b6b89-6mqgk   1/1     Running   1 (22h ago)   26h\n

    Verify SQS Queue is created.

    $ aws sqs list-queues\n\n{\n    \"QueueUrls\": [\n        \"https://sqs.us-east-1.amazonaws.com/XXXXXXXXXXXXXX/aws_node_termination_handler20221123072051157700000004\"\n    ]\n}\n

    Verify Event Rules are created.

    $ aws event list-rules\n{\n    [\n        {\n            \"Name\": \"NTH-ASGTerminiate-20230602191740664900000025\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-ASGTerminiate-20230602191740664900000025\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"Auto scaling instance terminate event\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-HealthEvent-20230602191740079300000022\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-HealthEvent-20230602191740079300000022\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"AWS health event\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-InstanceRebalance-20230602191740077100000021\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceRebalance-20230602191740077100000021\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 instance rebalance recommendation\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-InstanceStateChange-20230602191740165000000024\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceStateChange-20230602191740165000000024\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 instance state-change notification\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTH-SpotInterrupt-20230602191740077100000020\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-SpotInterrupt-20230602191740077100000020\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 spot instance interruption warning\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHASGTermRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHASGTermRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHInstanceStateChangeRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHInstanceStateChangeRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHRebalanceRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHRebalanceRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHScheduledChangeRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHScheduledChangeRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        },\n        {\n            \"Name\": \"NTHSpotTermRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHSpotTermRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n        }\n    ]\n}\n
    "},{"location":"addons/aws-private-ca-issuer/","title":"AWS Private CA Issuer","text":"

    AWS Private CA is an AWS service that can setup and manage private CAs, as well as issue private certificates. This add-on deploys the AWS Private CA Issuer as an external issuer to cert-manager that signs off certificate requests using AWS Private CA in an Amazon EKS Cluster.

    "},{"location":"addons/aws-private-ca-issuer/#usage","title":"Usage","text":""},{"location":"addons/aws-private-ca-issuer/#pre-requisites","title":"Pre-requisites","text":"

    To deploy the AWS PCA, you need to install cert-manager first, refer to this documentation to do it through EKS Blueprints Addons.

    "},{"location":"addons/aws-private-ca-issuer/#deployment","title":"Deployment","text":"

    With cert-manager deployed in place, you can deploy the AWS Private CA Issuer Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

    module \"eks_blueprints_addons\" {\n\n  enable_cert_manager         = true\n  enable_aws_privateca_issuer = true\n  aws_privateca_issuer = {\n    acmca_arn        = aws_acmpca_certificate_authority.this.arn\n  }\n}\n
    "},{"location":"addons/aws-private-ca-issuer/#helm-chart-customization","title":"Helm Chart customization","text":"

    It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

      aws_privateca_issuer = {\n    acmca_arn        = aws_acmpca_certificate_authority.this.arn\n    namespace        = \"aws-privateca-issuer\"\n    create_namespace = true\n  }\n

    You can find all available Helm Chart parameter values here.

    "},{"location":"addons/aws-private-ca-issuer/#validation","title":"Validation","text":"
    1. List all the pods running in aws-privateca-issuer and cert-manager Namespace.
    kubectl get pods -n aws-privateca-issuer\nkubectl get pods -n cert-manager\n
    1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
    kubectl get certificate -o wide\nNAME      READY   SECRET                  ISSUER                    STATUS                                          AGE\nexample   True    example-clusterissuer   tls-with-aws-pca-issuer   Certificate is up to date and has not expired   41m\n\nkubectl get secret example-clusterissuer\nNAME                    TYPE                DATA   AGE\nexample-clusterissuer   kubernetes.io/tls   3      43m\n
    "},{"location":"addons/aws-private-ca-issuer/#resources","title":"Resources","text":"

    GitHub Repo Helm Chart AWS Docs

    "},{"location":"addons/bottlerocket/","title":"Bottlerocket and Bottlerocket Update Operator","text":"

    Bottlerocket is a Linux-based open-source operating system that focuses on security and maintainability, providing a reliable, consistent, and safe platform for container-based workloads.

    The Bottlerocket Update Operator (BRUPOP) is a Kubernetes operator that coordinates Bottlerocket updates on hosts in a cluster. It relies on a controller deployment on one node to orchestrate updates across the cluster, an agent daemon set on every Bottlerocket node, which is responsible for periodically querying and performing updates rolled out in waves to reduce the impact of issues, and an API Server that performs additional authorization.

    Cert-manager is required for the API server to use a CA certificate when communicating over SSL with the agents.

    • Helm charts
    "},{"location":"addons/bottlerocket/#requirements","title":"Requirements","text":"

    BRUPOP perform updates on Nodes running with Bottlerocket OS only. Here are some code snippets of how to setup up Bottlerocket OS Nodes using Managed Node Groups with Terraform Amazon EKS module and Karpenter Node Classes.

    Notice the label bottlerocket.aws/updater-interface-version=2.0.0 set in the [settings.kubernetes.node-labels] section. This label is required for the BRUPOP Agent to query and perform updates. Nodes not labeled will not be checked by the agent.

    "},{"location":"addons/bottlerocket/#managed-node-groups","title":"Managed Node Groups","text":"
    module \"eks\" {\n  source  = \"terraform-aws-modules/eks/aws\"\n  version = \"~> 19.21\"\n...\n  eks_managed_node_groups = {\n    bottlerocket = {\n      platform = \"bottlerocket\"\n      ami_type       = \"BOTTLEROCKET_x86_64\"\n      instance_types = [\"m5.large\", \"m5a.large\"]\n\n      iam_role_attach_cni_policy = true\n\n      min_size     = 1\n      max_size     = 5\n      desired_size = 3\n\n      enable_bootstrap_user_data = true\n      bootstrap_extra_args = <<-EOT\n            [settings.host-containers.admin]\n            enabled = false\n            [settings.host-containers.control]\n            enabled = true\n            [settings.kernel]\n            lockdown = \"integrity\"\n            [settings.kubernetes.node-labels]\n            \"bottlerocket.aws/updater-interface-version\" = \"2.0.0\"\n            [settings.kubernetes.node-taints]\n            \"CriticalAddonsOnly\" = \"true:NoSchedule\"\n          EOT\n    }\n  }\n}\n
    "},{"location":"addons/bottlerocket/#karpenter","title":"Karpenter","text":"
    apiVersion: karpenter.k8s.aws/v1beta1\nkind: EC2NodeClass\nmetadata:\n  name: bottlerocket-example\nspec:\n...\n  amiFamily: Bottlerocket\n  userData:  |\n    [settings.kubernetes]\n    \"kube-api-qps\" = 30\n    \"shutdown-grace-period\" = \"30s\"\n    \"shutdown-grace-period-for-critical-pods\" = \"30s\"\n    [settings.kubernetes.eviction-hard]\n    \"memory.available\" = \"20%\"\n    [settings.kubernetes.node-labels]\n     \"bottlerocket.aws/updater-interface-version\" = \"2.0.0\"\n
    "},{"location":"addons/bottlerocket/#usage","title":"Usage","text":"

    BRUPOP can be deployed with the default configuration by enabling the add-on via the following. Notice the parameter wait = true set for Cert-Manager, this is needed since BRUPOP requires that Cert-Manager CRDs are already present in the cluster to be deployed.

    module \"eks_blueprints_addons\" {\n  source  = \"aws-ia/eks-blueprints-addons/aws\"\n  version = \"~> 1.13\"\n\n  cluster_name      = module.eks.cluster_name\n  cluster_endpoint  = module.eks.cluster_endpoint\n  cluster_version   = module.eks.cluster_version\n  oidc_provider_arn = module.eks.oidc_provider_arn\n\n  enable_cert_manager = true\n  cert_manager = {\n    wait = true\n  }\n  enable_bottlerocket_update_operator = true\n}\n

    You can also customize the Helm charts that deploys bottlerocket_update_operator and the bottlerocket_shadow via the following configuration:

    enable_bottlerocket_update_operator           = true\n\nbottlerocket_update_operator = {\n  name          = \"brupop-operator\"\n  description   = \"A Helm chart for BRUPOP\"\n  chart_version = \"1.3.0\"\n  namespace     = \"brupop\"\n  set           = [{\n    name  = \"scheduler_cron_expression\"\n    value = \"0 * * * * * *\" # Default Unix Cron syntax, set to check every hour. Example \"0 0 23 * * Sat *\" Perform update checks every Saturday at 23H / 11PM\n    }]\n}\n\nbottlerocket_shadow = {\n  name          = \"brupop-crds\"\n  description   = \"A Helm chart for BRUPOP CRDs\"\n  chart_version = \"1.0.0\"\n}\n

    To see a complete working example, see the bottlerocket Blueprints Pattern.

    "},{"location":"addons/bottlerocket/#validate","title":"Validate","text":"
    1. Run update-kubeconfig command:
    aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
    1. Test by listing velero resources provisioned:
    $ kubectl -n brupop-bottlerocket-aws get all\n\nNAME                                                READY   STATUS    RESTARTS      AGE\npod/brupop-agent-5nv6m                              1/1     Running   1 (33h ago)   33h\npod/brupop-agent-h4vw9                              1/1     Running   1 (33h ago)   33h\npod/brupop-agent-sr9ms                              1/1     Running   2 (33h ago)   33h\npod/brupop-apiserver-6ccb74f599-4c9lv               1/1     Running   0             33h\npod/brupop-apiserver-6ccb74f599-h6hg8               1/1     Running   0             33h\npod/brupop-apiserver-6ccb74f599-svw8n               1/1     Running   0             33h\npod/brupop-controller-deployment-58d46595cc-7vxnt   1/1     Running   0             33h\n\nNAME                               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE\nservice/brupop-apiserver           ClusterIP   172.20.153.72   <none>        443/TCP   33h\nservice/brupop-controller-server   ClusterIP   172.20.7.127    <none>        80/TCP    33h\n\nNAME                          DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE\ndaemonset.apps/brupop-agent   3         3         3       3            3           <none>          33h\n\nNAME                                           READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/brupop-apiserver               3/3     3            3           33h\ndeployment.apps/brupop-controller-deployment   1/1     1            1           33h\n\nNAME                                                      DESIRED   CURRENT   READY   AGE\nreplicaset.apps/brupop-apiserver-6ccb74f599               3         3         3       33h\nreplicaset.apps/brupop-controller-deployment-58d46595cc   1         1         1       33h\n\n$ kubectl describe apiservices.apiregistration.k8s.io v2.brupop.bottlerocket.aws\nName:         v2.brupop.bottlerocket.aws\nNamespace:\nLabels:       kube-aggregator.kubernetes.io/automanaged=true\nAnnotations:  <none>\nAPI Version:  apiregistration.k8s.io/v1\nKind:         APIService\nMetadata:\n  Creation Timestamp:  2024-01-30T16:27:15Z\n  Resource Version:    8798\n  UID:                 034abe22-7e5f-4040-9b64-8ca9d55a4af6\nSpec:\n  Group:                   brupop.bottlerocket.aws\n  Group Priority Minimum:  1000\n  Version:                 v2\n  Version Priority:        100\nStatus:\n  Conditions:\n    Last Transition Time:  2024-01-30T16:27:15Z\n    Message:               Local APIServices are always available\n    Reason:                Local\n    Status:                True\n    Type:                  Available\nEvents:                    <none>\n
    1. If not set during the deployment, add the required label bottlerocket.aws/updater-interface-version=2.0.0 as shown below to all the Nodes that you want to have updates handled by BRUPOP.
    $ kubectl label node ip-10-0-34-87.us-west-2.compute.internal bottlerocket.aws/updater-interface-version=2.0.0\nnode/ip-10-0-34-87.us-west-2.compute.internal labeled\n\n$ kubectl get nodes -L bottlerocket.aws/updater-interface-version\nNAME                                        STATUS                     ROLES    AGE   VERSION               UPDATER-INTERFACE-VERSION\nip-10-0-34-87.us-west-2.compute.internal    Ready                      <none>   34h   v1.28.1-eks-d91a302   2.0.0\n
    1. Because the default cron schedule for BRUPOP is set to check for updates every minute, you'll be able to see in a few minutes that the Node had it's version updated automatically with no downtime.
    kubectl get nodes\nNAME                                        STATUS                     ROLES    AGE   VERSION\nip-10-0-34-87.us-west-2.compute.internal    Ready                      <none>   34h   v1.28.4-eks-d91a302\n
    "},{"location":"addons/cert-manager/","title":"Cert-Manager","text":"

    Cert-manager is a X.509 certificate controller for Kubernetes-like workloads. It will obtain certificates from a variety of Issuers, both popular public Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to renew certificates at a configured time before expiry. This Add-on deploys this controller in an Amazon EKS Cluster.

    "},{"location":"addons/cert-manager/#usage","title":"Usage","text":"

    To deploy cert-manager Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

    module \"eks_blueprints_addons\" {\n\n  enable_cert_manager         = true\n}\n
    "},{"location":"addons/cert-manager/#helm-chart-customization","title":"Helm Chart customization","text":"

    It's possible to customize your deployment using the Helm Chart parameters inside the cert-manager configuration block:

      cert-manager = {\n    chart_version    = \"v1.11.1\"\n    namespace        = \"cert-manager\"\n    create_namespace = true\n  }\n

    You can find all available Helm Chart parameter values here

    "},{"location":"addons/cert-manager/#validation","title":"Validation","text":"
    1. Validate if the Cert-Manger Pods are Running.
    kubectl -n cert-manager get pods\nNAME                                      READY   STATUS    RESTARTS   AGE\ncert-manager-5989bcc87-96qvf              1/1     Running   0          2m49s\ncert-manager-cainjector-9b44ddb68-8c7b9   1/1     Running   0          2m49s\ncert-manager-webhook-776b65456-k6br4      1/1     Running   0          2m49s\n
    1. Create a SelfSigned ClusterIssuer resource in the cluster.
    apiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: selfsigned-cluster-issuer\nspec:\n  selfSigned: {}\n
    kubectl get clusterissuers -o wide selfsigned-cluster-issuer\nNAME                        READY   STATUS   AGE\nselfsigned-cluster-issuer   True             3m\n
    1. Create a Certificate in a given Namespace.
    apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n  name: example\n  namespace: default\nspec:\n  isCA: true\n  commonName: example\n  secretName: example-secret\n  privateKey:\n    algorithm: ECDSA\n    size: 256\n  issuerRef:\n    name: selfsigned-cluster-issuer\n    kind: ClusterIssuer\n    group: cert-manager.io\n
    1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
    kubectl get certificate -o wide\nNAME      READY   SECRET           ISSUER                      STATUS                                          AGE\nexample   True    example-secret   selfsigned-cluster-issuer   Certificate is up to date and has not expired   44s\n\nkubectl get secret example-secret\nNAME             TYPE                DATA   AGE\nexample-secret   kubernetes.io/tls   3      70s\n
    "},{"location":"addons/cert-manager/#resources","title":"Resources","text":"

    GitHub Repo Helm Chart

    "},{"location":"addons/cluster-autoscaler/","title":"Cluster Autoscaler","text":"

    The Kubernetes Cluster Autoscaler automatically adjusts the number of nodes in your cluster when pods fail or are rescheduled onto other nodes. The Cluster Autoscaler uses Auto Scaling groups. For more information, see Cluster Autoscaler on AWS.

    "},{"location":"addons/cluster-autoscaler/#usage","title":"Usage","text":"

    Cluster Autoscaler can be deployed by enabling the add-on via the following.

    enable_cluster_autoscaler = true\n

    You can optionally customize the Helm chart that deploys Cluster Autoscaler via the following configuration.

      enable_cluster_autoscaler = true\n\n  cluster_autoscaler = {\n    name          = \"cluster-autoscaler\"\n    chart_version = \"9.29.0\"\n    repository    = \"https://kubernetes.github.io/autoscaler\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify cluster-autoscaler pods are running.

    $ kubectl get pods -n kube-system\nNAME                                                         READY   STATUS    RESTARTS     AGE\ncluster-autoscaler-aws-cluster-autoscaler-7ff79bc484-pm8g9   1/1     Running   1 (2d ago)   2d5h\n
    "},{"location":"addons/cluster-proportional-autoscaler/","title":"Cluster Proportional Autoscaler","text":"

    Horizontal cluster-proportional-autoscaler watches over the number of schedulable nodes and cores of the cluster and resizes the number of replicas for the required resource. This functionality may be desirable for applications that need to be autoscaled with the size of the cluster, such as CoreDNS and other services that scale with the number of nodes/pods in the cluster.

    The cluster-proportional-autoscaler helps to scale the applications using deployment or replicationcontroller or replicaset. This is an alternative solution to Horizontal Pod Autoscaling. It is typically installed as a Deployment in your cluster.

    Refer to the eks-best-practices-guides for addional configuration guidanance.

    "},{"location":"addons/cluster-proportional-autoscaler/#usage","title":"Usage","text":"

    This add-on requires both enable_cluster_proportional_autoscaler and cluster_proportional_autoscaler as mandatory fields.

    The example shows how to enable cluster-proportional-autoscaler for CoreDNS Deployment. CoreDNS deployment is not configured with HPA. So, this add-on helps to scale CoreDNS Add-on according to the size of the nodes and cores.

    This Add-on can be used to scale any application with Deployment objects.

    enable_cluster_proportional_autoscaler  = true\ncluster_proportional_autoscaler  = {\n    values = [\n      <<-EOT\n        nameOverride: kube-dns-autoscaler\n\n        # Formula for controlling the replicas. Adjust according to your needs\n        # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )\n        config:\n          linear:\n            coresPerReplica: 256\n            nodesPerReplica: 16\n            min: 1\n            max: 100\n            preventSinglePointFailure: true\n            includeUnschedulableNodes: true\n\n        # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive).\n        options:\n          target: deployment/coredns # Notice the target as `deployment/coredns`\n\n        serviceAccount:\n          create: true\n          name: kube-dns-autoscaler\n\n        podSecurityContext:\n          seccompProfile:\n            type: RuntimeDefault\n            supplementalGroups: [65534]\n            fsGroup: 65534\n\n        resources:\n          limits:\n            cpu: 100m\n            memory: 128Mi\n          requests:\n            cpu: 100m\n            memory: 128Mi\n\n        tolerations:\n          - key: \"CriticalAddonsOnly\"\n            operator: \"Exists\"\n            description: \"Cluster Proportional Autoscaler for CoreDNS Service\"\n      EOT\n    ]\n}\n
    "},{"location":"addons/cluster-proportional-autoscaler/#expected-result","title":"Expected result","text":"

    The cluster-proportional-autoscaler pod running in the kube-system namespace.

    kubectl -n kube-system get po -l app.kubernetes.io/instance=cluster-proportional-autoscaler\nNAME                                                              READY   STATUS    RESTARTS   AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler-d8dc8477xx7   1/1     Running   0          21h\n
    The cluster-proportional-autoscaler-kube-dns-autoscaler config map exists.
    kubectl -n kube-system get cm cluster-proportional-autoscaler-kube-dns-autoscaler\nNAME                                                  DATA   AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler   1      21h\n

    "},{"location":"addons/cluster-proportional-autoscaler/#testing","title":"Testing","text":"

    To test that coredns pods scale, first take a baseline of how many nodes the cluster has and how many coredns pods are running.

    kubectl get nodes\nNAME                          STATUS   ROLES    AGE   VERSION\nip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME                       READY   STATUS    RESTARTS   AGE\ncoredns-7975d6fb9b-dlkdd   1/1     Running   0          21h\ncoredns-7975d6fb9b-xqqwp   1/1     Running   0          21h\n

    Change the following parameters in the hcl code above so a scaling event can be easily triggered:

            config:\n          linear:\n            coresPerReplica: 4\n            nodesPerReplica: 2\n            min: 1\n            max: 4\n
    and execute terraform apply.

    Increase the managed node group desired size, in this example from 4 to 5. This can be done via the AWS Console.

    Check that the new node came up and coredns scaled up.

    NAME                          STATUS   ROLES    AGE   VERSION\nip-10-0-14-120.ec2.internal   Ready    <none>   10m   v1.26.4-eks-0a21954\nip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME                       READY   STATUS    RESTARTS   AGE\ncoredns-7975d6fb9b-dlkdd   1/1     Running   0          21h\ncoredns-7975d6fb9b-ww64t   1/1     Running   0          10m\ncoredns-7975d6fb9b-xqqwp   1/1     Running   0          21h\n

    "},{"location":"addons/external-dns/","title":"External DNS","text":"

    ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the Kubernetes API to determine a desired list of DNS records. Unlike KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly\u2014e.g. AWS Route 53.

    "},{"location":"addons/external-dns/#usage","title":"Usage","text":"

    External DNS can be deployed by enabling the add-on via the following.

    enable_external_dns = true\n

    You can optionally customize the Helm chart that deploys External DNS via the following configuration.

      enable_external_dns = true\n\n  external_dns = {\n    name          = \"external-dns\"\n    chart_version = \"1.12.2\"\n    repository    = \"https://kubernetes-sigs.github.io/external-dns/\"\n    namespace     = \"external-dns\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n  external_dns_route53_zone_arns = [\"XXXXXXXXXXXXXXXXXXXXXXX\"]\n

    Verify external-dns pods are running.

    $ kubectl get pods -n external-dns\nNAME                            READY   STATUS    RESTARTS     AGE\nexternal-dns-849b89c675-ffnf6   1/1     Running   1 (2d ago)   2d5h\n

    To further configure external-dns, refer to the examples:

    • AWS Load Balancer Controller
    • Route53
    • Same domain for public and private Route53 zones
    • Cloud Map
    • Kube Ingress AWS Controller
    "},{"location":"addons/external-secrets/","title":"External Secrets","text":"

    External Secrets Operator is a Kubernetes operator that integrates external secret management systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets Manager, and many more. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret.

    "},{"location":"addons/external-secrets/#usage","title":"Usage","text":"

    External Secrets can be deployed by enabling the add-on via the following.

    enable_external_secrets = true\n

    You can optionally customize the Helm chart that deploys External Secrets via the following configuration.

      enable_external_secrets = true\n\n  external_secrets = {\n    name          = \"external-secrets\"\n    chart_version = \"0.9.13\"\n    repository    = \"https://charts.external-secrets.io\"\n    namespace     = \"external-secrets\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify external-secrets pods are running.

    $ kubectl get pods -n external-secrets\nNAME                                               READY   STATUS    RESTARTS       AGE\nexternal-secrets-67bfd5b47c-xc5xf                  1/1     Running   1 (2d1h ago)   2d6h\nexternal-secrets-cert-controller-8f75c6f79-qcfx4   1/1     Running   1 (2d1h ago)   2d6h\nexternal-secrets-webhook-78f6bd456-76wmm           1/1     Running   1 (2d1h ago)   2d6h\n
    "},{"location":"addons/external-secrets/#eks-fargate","title":"EKS Fargate","text":"

    By default, external-secrets creates a webhook pod that listens on port 10250 [Reference]:

    yes, by default we use port 10250 for the webhook pod because it's generally allowed throughout most default firewall implementations (GKE, EKS), but it conflicts with Fargate. Any port number should do the trick, as long as there is no sg rules or NACLs blocking it :).

    This module adds a value enable_eks_fargate which will change the webhook port from 10250 to 9443 which matches the prior default value for external-secrets and is typically an acceptable port value within most clusters firewalls today.

    "},{"location":"addons/fargate-fluentbit/","title":"Fargate FluentBit","text":"

    Amazon EKS on Fargate offers a built-in log router based on Fluent Bit. This means that you don't explicitly run a Fluent Bit container as a sidecar, but Amazon runs it for you. All that you have to do is configure the log router. The configuration happens through a dedicated ConfigMap, that is deployed via this Add-on.

    "},{"location":"addons/fargate-fluentbit/#usage","title":"Usage","text":"

    To configure the Fargate Fluentbit ConfigMap via the EKS Blueprints Addons, just reference the following parameters under the module.eks_blueprints_addons.

    module \"eks_blueprints_addons\" {\n\n  enable_fargate_fluentbit = true\n  fargate_fluentbit = {\n    flb_log_cw = true\n  }\n}\n

    It's possible to customize the CloudWatch Log Group parameters in the fargate_fluentbit_cw_log_group configuration block:

      fargate_fluentbit_cw_log_group = {\n\n  name              = \"existing-log-group\"\n  name_prefix       = \"dev-environment-logs\"\n  retention_in_days = 7\n  kms_key_id        = \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n  skip_destroy      = true\n  }\n
    "},{"location":"addons/fargate-fluentbit/#validation","title":"Validation","text":"
    1. Check if the aws-logging configMap for Fargate Fluentbit was created.
    kubectl -n aws-observability get configmap aws-logging -o yaml\napiVersion: v1\ndata:\n  filters.conf: |\n    [FILTER]\n      Name parser\n      Match *\n      Key_Name log\n      Parser regex\n      Preserve_Key True\n      Reserve_Data True\n  flb_log_cw: \"true\"\n  output.conf: |\n    [OUTPUT]\n      Name cloudwatch_logs\n      Match *\n      region us-west-2\n      log_group_name /fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\n      log_stream_prefix fargate-logs-\n      auto_create_group true\n  parsers.conf: |\n    [PARSER]\n      Name regex\n      Format regex\n      Regex ^(?<time>[^ ]+) (?<stream>[^ ]+) (?<logtag>[^ ]+) (?<message>.+)$\n      Time_Key time\n      Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n      Time_Keep On\n      Decode_Field_As json message\nimmutable: false\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-05-08T21:14:52Z\"\n  name: aws-logging\n  namespace: aws-observability\n  resourceVersion: \"1795\"\n  uid: d822bcf5-a441-4996-857e-7fb1357bc07e\n
    1. Validate if the CloudWatch LogGroup was created accordingly, and LogStreams were populated.
    aws logs describe-log-groups --log-group-name-prefix \"/fargate-serverless/fargate-fluentbit\"\n{\n    \"logGroups\": [\n        {\n            \"logGroupName\": \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\",\n            \"creationTime\": 1683580491652,\n            \"retentionInDays\": 90,\n            \"metricFilterCount\": 0,\n            \"arn\": \"arn:aws:logs:us-west-2:111122223333:log-group:/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006:*\",\n            \"storedBytes\": 0\n        }\n    ]\n}\n
    aws logs describe-log-streams --log-group-name \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\" --log-stream-name-prefix fargate-logs --query 'logStreams[].logStreamName'\n[\n    \"fargate-logs-flblogs.var.log.fluent-bit.log\",\n    \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-grjsq_kube-system_aws-load-balancer-controller-feaa22b4cdaa71ecfc8355feb81d4b61ea85598a7bb57aef07667c767c6b98e4.log\",\n    \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-wzr46_kube-system_aws-load-balancer-controller-69075ea9ab3c7474eac2a1696d3a84a848a151420cd783d79aeef960b181567f.log\",\n    \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-8cxvq_kube-system_coredns-9e4f3ab435269a566bcbaa606c02c146ad58508e67cef09fa87d5c09e4ac0088.log\",\n    \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-gcjwp_kube-system_coredns-11016818361cd68c32bf8f0b1328f3d92a6d7b8cf5879bfe8b301f393cb011cc.log\"\n]\n
    "},{"location":"addons/fargate-fluentbit/#resources","title":"Resources","text":"

    AWS Docs Fluent Bit for Amazon EKS on AWS Fargate Blog Post

    "},{"location":"addons/ingress-nginx/","title":"Ingress Nginx","text":"

    This add-on installs Ingress Nginx Controller on Amazon EKS. The Ingress Nginx controller uses Nginx as a reverse proxy and load balancer.

    Other than handling Kubernetes ingress objects, this ingress controller can facilitate multi-tenancy and segregation of workload ingresses based on host name (host-based routing) and/or URL Path (path based routing).

    "},{"location":"addons/ingress-nginx/#usage","title":"Usage","text":"

    Ingress Nginx Controller can be deployed by enabling the add-on via the following.

    enable_ingress_nginx = true\n

    You can optionally customize the Helm chart that deploys ingress-nginx via the following configuration.

      enable_ingress_nginx = true\n\n  ingress_nginx = {\n    name          = \"ingress-nginx\"\n    chart_version = \"4.6.1\"\n    repository    = \"https://kubernetes.github.io/ingress-nginx\"\n    namespace     = \"ingress-nginx\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify ingress-nginx pods are running.

    $ kubectl get pods -n ingress-nginx\nNAME                                       READY   STATUS    RESTARTS   AGE\ningress-nginx-controller-f6c55fdc8-8bt2z   1/1     Running   0          44m\n
    "},{"location":"addons/karpenter/","title":"Karpenter","text":""},{"location":"addons/karpenter/#prerequisites","title":"Prerequisites","text":"

    If deploying a node template that uses spot, please ensure you have the Spot service linked role available in your account. You can run the following command to ensure this role is available:

    aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true\n
    "},{"location":"addons/karpenter/#validate","title":"Validate","text":"

    The following command will update the kubeconfig on your local machine and allow you to interact with your EKS Cluster using kubectl to validate the CoreDNS deployment for Fargate.

    1. Run update-kubeconfig command:
    aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
    1. Test by listing all the pods running currently
    kubectl get pods -n karpenter\n\n# Output should look similar to below\nNAME                         READY   STATUS    RESTARTS   AGE\nkarpenter-6f97df4f77-5nqsk   1/1     Running   0          3m28s\nkarpenter-6f97df4f77-n7fkf   1/1     Running   0          3m28s\n
    1. View the current nodes - this example utilizes EKS Fargate for hosting the Karpenter controller so only Fargate nodes are present currently:
    kubectl get nodes\n\n# Output should look similar to below\nNAME                                                STATUS   ROLES    AGE     VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   2m56s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   2m57s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   2m34s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   2m33s   v1.26.3-eks-f4dc2c0\n
    1. Create a sample pause deployment to demonstrate scaling:
    kubectl apply -f - <<EOF\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: inflate\nspec:\n  replicas: 0\n  selector:\n    matchLabels:\n      app: inflate\n  template:\n    metadata:\n      labels:\n        app: inflate\n    spec:\n      terminationGracePeriodSeconds: 0\n      containers:\n        - name: inflate\n          image: public.ecr.aws/eks-distro/kubernetes/pause:3.7\n          resources:\n            requests:\n              cpu: 1\nEOF\n
    1. Scale up the sample pause deployment to see Karpenter respond by provisioning nodes to support the workload:
    kubectl scale deployment inflate --replicas 5\n# To view logs\n# kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller\n
    1. Re-check the nodes, you will now see a new EC2 node provisioned to support the scaled workload:
    kubectl get nodes\n\n# Output should look similar to below\nNAME                                                STATUS   ROLES    AGE     VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   5m15s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   5m16s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   4m53s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   4m52s   v1.26.3-eks-f4dc2c0\nip-10-0-1-184.us-west-2.compute.internal            Ready    <none>   26s     v1.26.2-eks-a59e1f0 # <= new EC2 node launched\n
    1. Remove the sample pause deployment:
    kubectl delete deployment inflate\n
    "},{"location":"addons/kube-prometheus-stack/","title":"Kube Prometheus Stack","text":"

    Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.

    "},{"location":"addons/kube-prometheus-stack/#usage","title":"Usage","text":"

    Kube Prometheus Stack can be deployed by enabling the add-on via the following.

    enable_kube_prometheus_stack = true\n

    You can optionally customize the Helm chart that deploys Kube Prometheus Stack via the following configuration.

      enable_kube_prometheus_stack = true\n\n  kube_prometheus_stack = {\n    name          = \"kube-prometheus-stack\"\n    chart_version = \"51.2.0\"\n    repository    = \"https://prometheus-community.github.io/helm-charts\"\n    namespace     = \"kube-prometheus-stack\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify kube-prometheus-stack pods are running.

    $ kubectl get pods -n external-secrets\nNAME                                                        READY   STATUS    RESTARTS       AGE\nalertmanager-kube-prometheus-stack-alertmanager-0           2/2     Running   3 (2d2h ago)   2d7h\nkube-prometheus-stack-grafana-5c6cf88fd9-8wc9k              3/3     Running   3 (2d2h ago)   2d7h\nkube-prometheus-stack-kube-state-metrics-584d8b5d5f-s6p8d   1/1     Running   1 (2d2h ago)   2d7h\nkube-prometheus-stack-operator-c74ddccb5-8cprr              1/1     Running   1 (2d2h ago)   2d7h\nkube-prometheus-stack-prometheus-node-exporter-vd8lw        1/1     Running   1 (2d2h ago)   2d7h\nprometheus-kube-prometheus-stack-prometheus-0               2/2     Running   2 (2d2h ago)   2d7h\n
    "},{"location":"addons/metrics-server/","title":"Metrics Server","text":"

    Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines.

    Metrics Server collects resource metrics from Kubelets and exposes them in Kubernetes apiserver through Metrics API for use by Horizontal Pod Autoscaler and Vertical Pod Autoscaler. Metrics API can also be accessed by kubectl top, making it easier to debug autoscaling pipelines.

    "},{"location":"addons/metrics-server/#usage","title":"Usage","text":"

    Metrics Server can be deployed by enabling the add-on via the following.

    enable_metrics_server = true\n

    You can optionally customize the Helm chart that deploys External DNS via the following configuration.

      enable_metrics_server = true\n\n  metrics_server = {\n    name          = \"metrics-server\"\n    chart_version = \"3.10.0\"\n    repository    = \"https://kubernetes-sigs.github.io/metrics-server/\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify metrics-server pods are running.

    $ kubectl get pods -n kube-system\nNAME                                   READY   STATUS    RESTARTS       AGE\nmetrics-server-6f9cdd486c-njh8b        1/1     Running   1 (2d2h ago)   2d7h\n
    "},{"location":"addons/opa-gatekeeper/","title":"OPA Gatekeeper","text":"

    Gatekeeper is an admission controller that validates requests to create and update Pods on Kubernetes clusters, using the Open Policy Agent (OPA). Using Gatekeeper allows administrators to define policies with a constraint, which is a set of conditions that permit or deny deployment behaviors in Kubernetes.

    For complete project documentation, please visit the Gatekeeper. For reference templates refer Templates

    "},{"location":"addons/opa-gatekeeper/#usage","title":"Usage","text":"

    Gatekeeper can be deployed by enabling the add-on via the following.

    enable_gatekeeper = true\n

    You can also customize the Helm chart that deploys gatekeeper via the following configuration:

      enable_gatekeeper = true\n\n  gatekeeper = {\n    name          = \"gatekeeper\"\n    chart_version = \"3.12.0\"\n    repository    = \"https://open-policy-agent.github.io/gatekeeper/charts\"\n    namespace     = \"gatekeeper-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n
    "},{"location":"addons/secrets-store-csi-driver-provider-aws/","title":"AWS Secrets Manager and Config Provider for Secret Store CSI Driver","text":"

    AWS offers two services to manage secrets and parameters conveniently in your code. AWS Secrets Manager allows you to easily rotate, manage, and retrieve database credentials, API keys, certificates, and other secrets throughout their lifecycle. AWS Systems Manager Parameter Store provides hierarchical storage for configuration data. The AWS provider for the Secrets Store CSI Driver allows you to make secrets stored in Secrets Manager and parameters stored in Parameter Store appear as files mounted in Kubernetes pods.

    "},{"location":"addons/secrets-store-csi-driver-provider-aws/#usage","title":"Usage","text":"

    AWS Secrets Store CSI Driver can be deployed by enabling the add-on via the following.

    enable_secrets_store_csi_driver              = true\nenable_secrets_store_csi_driver_provider_aws = true\n

    You can optionally customize the Helm chart via the following configuration.

      enable_secrets_store_csi_driver              = true\n  enable_secrets_store_csi_driver_provider_aws = true\n\n  secrets_store_csi_driver_provider_aws = {\n    name          = \"secrets-store-csi-driver\"\n    chart_version = \"0.3.2\"\n    repository    = \"https://aws.github.io/secrets-store-csi-driver-provider-aws\"\n    namespace     = \"kube-system\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n

    Verify metrics-server pods are running.

    $ kubectl get pods -n kube-system\nNAME                                         READY   STATUS    RESTARTS       AGE\nsecrets-store-csi-driver-9l2z8               3/3     Running   1 (2d5h ago)   2d9h\nsecrets-store-csi-driver-provider-aws-2qqkk  1/1     Running   1 (2d5h ago)   2d9h\n
    "},{"location":"addons/velero/","title":"Velero","text":"

    Velero is an open source tool to safely backup and restore, perform disaster recovery, and migrate Kubernetes cluster resources and persistent volumes.

    • Helm chart
    • Plugin for AWS
    "},{"location":"addons/velero/#usage","title":"Usage","text":"

    Velero can be deployed by enabling the add-on via the following.

    enable_velero           = true\nvelero_backup_s3_bucket = \"<YOUR_BUCKET_NAME>\"\nvelero = {\n    s3_backup_location = \"<YOUR_S3_BUCKET_ARN>[/prefix]\"\n  }\n

    You can also customize the Helm chart that deploys velero via the following configuration:

    enable_velero           = true\n\nvelero = {\n  name          = \"velero\"\n  description   = \"A Helm chart for velero\"\n  chart_version = \"3.1.6\"\n  repository    = \"https://vmware-tanzu.github.io/helm-charts/\"\n  namespace     = \"velero\"\n  values        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

    To see a working example, see the stateful example blueprint.

    "},{"location":"addons/velero/#validate","title":"Validate","text":"
    1. Run update-kubeconfig command:
    aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
    1. Test by listing velero resources provisioned:
    kubectl get all -n velero\n\n# Output should look similar to below\nNAME                         READY   STATUS    RESTARTS   AGE\npod/velero-7b8994d56-z89sl   1/1     Running   0          25h\n\nNAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE\nservice/velero   ClusterIP   172.20.20.118   <none>        8085/TCP   25h\n\nNAME                     READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/velero   1/1     1            1           25h\n\nNAME                               DESIRED   CURRENT   READY   AGE\nreplicaset.apps/velero-7b8994d56   1         1         1       25h\n
    1. Get backup location using velero CLI
    velero backup-location get\n\n# Output should look similar to below\nNAME      PROVIDER   BUCKET/PREFIX                                 PHASE       LAST VALIDATED                  ACCESS MODE   DEFAULT\ndefault   aws        stateful-20230503175301619800000005/backups   Available   2023-05-04 15:15:00 -0400 EDT   ReadWrite     true\n
    1. To demonstrate creating a backup and restoring, create a new namespace and run nginx using below commands:
    kubectl create namespace backupdemo\nkubectl run nginx --image=nginx -n backupdemo\n
    1. Create backup of this namespace using velero
    velero backup create backup1 --include-namespaces backupdemo\n\n# Output should look similar to below\nBackup request \"backup1\" submitted successfully.\nRun `velero backup describe backup1` or `velero backup logs backup1` for more details.\n
    1. Describe the backup to check the backup status
    velero backup describe backup1\n\n# Output should look similar to below\nName:         backup1\nNamespace:    velero\nLabels:       velero.io/storage-location=default\nAnnotations:  velero.io/source-cluster-k8s-gitversion=v1.26.2-eks-a59e1f0\n              velero.io/source-cluster-k8s-major-version=1\n              velero.io/source-cluster-k8s-minor-version=26+\n\nPhase:  Completed\n\n\nNamespaces:\n  Included:  backupdemo\n  Excluded:  <none>\n\nResources:\n  Included:        *\n  Excluded:        <none>\n  Cluster-scoped:  auto\n\nLabel selector:  <none>\n\nStorage Location:  default\n\nVelero-Native Snapshot PVs:  auto\n\nTTL:  720h0m0s\n\nCSISnapshotTimeout:    10m0s\nItemOperationTimeout:  0s\n\nHooks:  <none>\n\nBackup Format Version:  1.1.0\n\nStarted:    2023-05-04 15:16:31 -0400 EDT\nCompleted:  2023-05-04 15:16:33 -0400 EDT\n\nExpiration:  2023-06-03 15:16:31 -0400 EDT\n\nTotal items to be backed up:  9\nItems backed up:              9\n\nVelero-Native Snapshots: <none included>\n
    1. Delete the namespace - this will be restored using the backup created
    kubectl delete namespace backupdemo\n
    1. Restore the namespace from your backup
    velero restore create --from-backup backup1\n
    1. Verify that the namespace is restored
    kubectl get all -n backupdemo\n\n# Output should look similar to below\nNAME        READY   STATUS    RESTARTS   AGE\npod/nginx   1/1     Running   0          21s\n
    "},{"location":"addons/vertical-pod-autoscaler/","title":"Vertical Pod Autoscaler","text":"

    VPA Vertical Pod Autoscaler (VPA) automatically adjusts the CPU and memory reservations for your pods to help \"right size\" your applications. When configured, it will automatically request the necessary reservations based on usage and thus allow proper scheduling onto nodes so that the appropriate resource amount is available for each pod. It will also maintain ratios between limits and requests that were specified in initial container configuration.

    NOTE: Metrics Server add-on is a dependency for this addon

    "},{"location":"addons/vertical-pod-autoscaler/#usage","title":"Usage","text":"

    This step deploys the Vertical Pod Autoscaler with default Helm Chart config

      enable_vpa            = true\n  enable_metrics_server = true\n

    You can also customize the Helm chart that deploys vpa via the following configuration:

      enable_vpa = true\n  enable_metrics_server = true\n\n  vpa = {\n    name          = \"vpa\"\n    chart_version = \"1.7.5\"\n    repository    = \"https://charts.fairwinds.com/stable\"\n    namespace     = \"vpa\"\n    values        = [templatefile(\"${path.module}/values.yaml\", {})]\n  }\n
    "}]} \ No newline at end of file diff --git a/main/sitemap.xml b/main/sitemap.xml index c7f06945..e0ed9d4c 100644 --- a/main/sitemap.xml +++ b/main/sitemap.xml @@ -2,162 +2,162 @@ https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/amazon-eks-addons/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/architectures/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/aws-partner-addons/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/helm-release/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/argo-events/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/argo-rollouts/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/argo-workflows/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/argocd/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-cloudwatch-metrics/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-efs-csi-driver/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-for-fluentbit/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-fsx-csi-driver/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-gateway-api-controller/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-load-balancer-controller/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-node-termination-handler/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/aws-private-ca-issuer/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/bottlerocket/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/cert-manager/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/cluster-autoscaler/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/cluster-proportional-autoscaler/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/external-dns/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/external-secrets/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/fargate-fluentbit/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/ingress-nginx/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/karpenter/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/kube-prometheus-stack/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/metrics-server/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/opa-gatekeeper/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/secrets-store-csi-driver-provider-aws/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/velero/ - 2024-03-25 + 2024-03-27 daily https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/main/addons/vertical-pod-autoscaler/ - 2024-03-25 + 2024-03-27 daily \ No newline at end of file diff --git a/main/sitemap.xml.gz b/main/sitemap.xml.gz index 645e694e8c3c9ba1f92df2c1df548a39ec8f0d52..c05605eb959b0e4230cff9bed6dd261a7823aeda 100644 GIT binary patch literal 557 zcmV+|0@D2-iwFqZg9K&*|8r?{Wo=<_E_iKh0Nt2Nj@vK{hWB%dAbZ7WTA&XknO#rN z9)Ol;Td*v;q#S2@`cg?yp(CDLUBXdZn--io<{}P4pe)S zQYH%L!~XEe?LWH1XEk}uJ(>9da*rM3uXBKy-j2=F%st-bZ@~X1beP~b<(tK0`aQ$2 zA2j8@ER63PM3-mal93IhWxzB!#)KgFuCsC<1NY?cg1T&lhAAal?C`9_W0z>P0_1}Q z5Vi7p%)$!A4{pN3(Tfo<>=aI-3mmnJF#@ILu2ZXA+E}bG$<dMLQUn}azzPr1~3 zZU!z91lK}>?6n;-hnFd!(~VQiP*nXmjV0K@)76WfPUpczk;Md8OQmRr$d72DEb41j z+yohV6WQ@;ky3@zQb;@`Q&2P0OkC>(BwK`d)g6^8WU+)J=ow8&t4^gzE#;P4!kBwT z(XVhH^VI;)2s~?YOjLw%X|WbORf+>H;-r8d4RAe_uT}=OAwg({n*i`8!WsYovJ?&H literal 557 zcmV+|0@D2-iwFqcssUyK|8r?{Wo=<_E_iKh0Nt3;uG26KhVRc)OuNrI21tOmojaZY z9>7i9HfkCdJ6(5pdYo-SdjXJ+d($*=EPrz9`tR}eI;G78St8}r_QP(!-C*_f(ST<^$<+L42nZGs7x~@Bk1t*wgN8C5!2Yry{G1=^QU$^I%jb7Ek zeRR8yiTt^G5p@HrxCpyGmSJgkk(z?c&=F6rF5xJyO$*K(a}ft2P?lzbn44o${XDw` zDHDbBVSo7K_8;Bhvzk2Sp3M9JxyO$2*Ev8;Z^!0o<{oeJH{gF0I!y4J^3CEg{hndi z51Mjc7RL7tqRTUI$;bxMGGLki`_Sz1a!^;%V>BcE$D5`#(#uDt{>FPyKr}N;V$YO%4rBXCQW)ekvRJ|q^o%B?Ri{#^G+oIb|u$=z@T0!J@!WsYoaTpMw