diff --git a/babel.config.js b/babel.config.js
deleted file mode 100644
index e00595dae7..0000000000
--- a/babel.config.js
+++ /dev/null
@@ -1,3 +0,0 @@
-module.exports = {
- presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
-};
diff --git a/custom-dictionary.txt b/custom-dictionary.txt
index ae96dd0ae5..7911f890ff 100644
--- a/custom-dictionary.txt
+++ b/custom-dictionary.txt
@@ -60,3 +60,9 @@ minamijoyo
tfupdate
hcledit
self-hosting
+infrachanges
+Entra
+GLMU
+myprodsa
+azuread
+mysa
diff --git a/docs/2.0/docs/accountfactory/architecture/index.md b/docs/2.0/docs/accountfactory/architecture/index.md
index 37ddb2ed91..d3c94c9050 100644
--- a/docs/2.0/docs/accountfactory/architecture/index.md
+++ b/docs/2.0/docs/accountfactory/architecture/index.md
@@ -28,13 +28,14 @@ sequenceDiagram
Infra Live Repository ->> Pipelines: Trigger Account Added
Pipelines ->> Core Accounts: Execute terragrunt to baseline account
```
+
## IAM roles
Newly created accounts include IAM policies that define the scope of changes Pipelines is authorized to perform within AWS. Pipelines automatically assumes the necessary roles for each account when it detects changes. Detailed information about the provisioned roles can be found [here](/2.0/docs/pipelines/architecture/security-controls#roles-provisioned-by-devops-foundations).
## Delegated repositories
-Delegated repositories enhance the architecture of infrastructure management by introducing additional layers of access control. When delegated repositories are created, Pipelines continues to manage new account security baselines within the `infrastructure-live-root` repository, while other infrastructure resources are managed in a new repository specific to the delegated account(s).
+Delegated repositories enhance the architecture of infrastructure management by introducing additional layers of access control. When delegated repositories are created, Pipelines continues to manage new account security baselines within the `infrastructure-live-root` repository, while other infrastructure resources are managed in a new repository specific to the delegated account(s).
Pipelines uses IAM roles from the `infrastructure-live-access-control` repository to deploy infrastructure in these delegated repositories. This setup enables the central platform team to define and restrict the scope of changes individual teams can make via Pipelines in delegated repositories.
diff --git a/docs/2.0/docs/accountfactory/architecture/repository-topology.md b/docs/2.0/docs/accountfactory/architecture/repository-topology.md
new file mode 100644
index 0000000000..f282218b50
--- /dev/null
+++ b/docs/2.0/docs/accountfactory/architecture/repository-topology.md
@@ -0,0 +1,149 @@
+# Repository Topology
+
+Gruntwork Account Factory provides an opinionated (but flexible) repository structure that supports organizations as they scale their infrastructure management across multiple AWS accounts. This approach is designed to help teams graduate from managing a handful of accounts with difficulty to being able to conveniently manage hundreds of accounts, all while maintaining high standards for security, compliance, and developer productivity.
+
+The repository topology is designed around a core principle: **centralized governance with distributed ownership**. Your platform team maintains control over critical security and compliance infrastructure, while application teams get the autonomy they need to move fast within well-defined guardrails.
+
+Understanding this repository structure will help you leverage Account Factory effectively and set your organization up for sustainable growth.
+
+## `infrastructure-live-root`
+
+Think of `infrastructure-live-root` as your organization's infrastructure command center. This repository, built from the [infrastructure-live-root-template](https://github.com/gruntwork-io/infrastructure-live-root-template), is where your platform team manages the foundational elements that every other AWS account depends on, and where your Account Factory workflow lives.
+
+This repository is the only repository with access to the AWS management account, and is trusted by IAM roles provisioned in all AWS accounts so that your platform team is able to provision infrastructure in them as necessary to prepare them for workloads. This is also where your platform team can provision new AWS accounts with consistent baselines whenever teams need them. You'll also manage critical organization-wide infrastructure like your AWS Landing Zone, central logging, and security services in this repository.
+
+Access to this repository is intentionally restricted to your most trusted platform team members. Every other piece of infrastructure in your organization can trace back to the foundational services configured here.
+
+### `infrastructure-live-root` workflows
+
+- **Account Factory:** (GitHub only) This is your self-service account vending machine. When someone needs a new AWS account (e.g. for a new application, environment, or team), they can trigger this workflow as the entrypoint for the account vending workflow. This workflow is triggered via [repository dispatch](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#repository_dispatch), which is a feature of GitHub Actions, and makes it possible to trigger the workflow from outside the repository using the GitHub API.
+
+ The workflow accepts a simple JSON payload (there's even a handy customizable HTML form included to make this easy) and creates a pull request with all the necessary infrastructure code to provision and baseline the new account. Organizations typically customize this form to capture additional metadata like additional tags, cost center codes or conditional creation of potentially expensive security services like Macie or GuardDuty.
+
+ :::tip
+
+ The included HTML form is just a starting point. Organizations typically customize this form to capture additional metadata like additional tags, cost center codes or conditional creation of potentially expensive security services like Macie or GuardDuty.
+
+ Once you have a good grasp of how the form works, and how it generates the JSON payload, you can even opt not to use the form at all, and instead trigger the workflow using the GitHub API directly from your internal platforms like ServiceNow or Jira.
+
+ You can learn more about this in the ["Using the Account Factory Workflow" guide](/2.0/docs/accountfactory/guides/vend-aws-account).
+
+ :::
+
+- **Pipelines:** This is where Account Factory integrates with Gruntwork Pipelines to drive infrastructure changes via GitOps workflows. With Gruntwork Pipelines, every infrastructure change goes through a proper review process with pull requests, approvals, and controlled deployments. Your platform team gets the confidence of peer review while maintaining the ability to rapidly deploy critical infrastructure changes.
+
+:::tip
+
+While you can rename `infrastructure-live-root` during setup, keeping the name consistent with our documentation makes life easier for your team. You also *can* create multiple root repositories for complex organizational structures, but be sure that it's worth the additional complexity for your organization. It can be a significant source of operational overhead, and you might be better off delegating some infrastructure management to a separate repository with a [delegated infrastructure-live repository](#infrastructure-live-delegated).
+
+:::
+
+## `infrastructure-live-access-control`
+
+This is where you solve one of the biggest challenges in scaling infrastructure management: **How do you give teams the access they need (and only give them the exact access they need) to manage their own infrastructure?**
+
+:::tip
+
+This repository is optional for most users (Enterprise customers must provision this repository for delegated repository access control), but is a highly recommended best-practice for all customers.
+
+:::
+
+The `infrastructure-live-access-control` repository is your organization's permission control center. It manages all the IAM roles, policies, and permissions that determine what each team can do in their AWS accounts outside of the `infrastructure-live-root` repository. It provides a central place where application engineers and the platform team can collaborate to define and iterate on the access control policies for roles that can be assumed by [delegated infrastructure-live repositories](#infrastructure-live-delegated).
+
+Your application teams can _request_ the access they need here through pull requests, but your platform team maintains oversight by reviewing and approving these changes, and branch protection rules can ensure that they have final say in the approval process. No more bottlenecks where platform teams have to manually create every single IAM policy (and determine the appropriate level of access for each team), and no more security risks from teams having overly broad permissions.
+
+:::info
+
+**Delegated infrastructure management** is the practice of allowing developers to manage infrastructure in a self-service fashion.
+
+Instead of having your platform team manually provision every resource required for your entire organization (which doesn't always scale), you can let application teams manage their own infrastructure within clearly defined boundaries. Your platform team still entirely controls critical resources (e.g. AWS accounts, VPCs, security policies) but developers can deploy and update their applications without opening a ticket and waiting on the platform team.
+
+Most organizations find success with a hybrid approach: centralized control for anything that affects security or compliance, delegated management for everything else. Where you draw that line depends on your risk tolerance, team maturity, and the complexity of your organization.
+
+:::
+
+:::tip
+
+You can name the `infrastructure-live-access-control` repository whatever makes sense for your organization. Just keep it descriptive so future team members (and your future self) know exactly what it does. If you can keep the name similar to `infrastructure-live-access-control`, you probably should so that it's easier for your team to learn more about it when reading Gruntwork documentation.
+
+While you *could* split access control across multiple repositories for very large organizations, remember that multiple sources of truth for permissions can quickly become a security and operational nightmare. Start with one repository and only consider splitting if you have a compelling organizational reason.
+
+:::
+
+### `infrastructure-live-access-control` workflows
+
+- **Pipelines** - Every permission change goes through the same GitOps workflow as your other infrastructure. When someone proposes new IAM policies or role changes, the workflow runs a plan to show exactly what will change. Once approved and merged, it automatically applies those changes across your AWS accounts.
+
+ This means your access control changes are auditable, reversible, and follow the same quality gates as the rest of your infrastructure. No more wondering who changed what permissions or scrambling to fix a misconfigured IAM policy.
+
+## `infrastructure-catalog`
+
+The `infrastructure-catalog` repository is your organization's internal infrastructure component library. This is where you build and maintain the custom Terragrunt/OpenTofu/Terraform resources that are specific to your organization's needs and standards, and can be reused throughout your organization.
+
+While Gruntwork provides battle-tested OpenTofu/Terraform modules for common infrastructure patterns, every organization has unique requirements. Maybe you need a special monitoring setup, custom networking configurations, or specific compliance controls. This repository is where those organization-specific modules live, get tested, and evolve alongside your infrastructure needs.
+
+The result? Instead of every team reinventing the wheel, they can leverage proven, tested components that follow your organization's best practices, and your engineers can work together to learn from each other and build on each other's work.
+
+:::tip
+
+Starting with a single `infrastructure-catalog` repository makes discoverability much easier (e.g. your teams won't have to guess where to find the standardized "database" module that follows your organization's best practices for security and cost savings). You can always split it later if your organization grows large enough that centralized module management becomes unwieldy. This can be the case if your central catalog starts to receive so many git tag updates that it becomes difficult to determine when a version bump in a module is a breaking change or not.
+
+Some large organizations also benefit from separate module repositories for different domains (security modules, application modules, etc.) or business units. Just make sure the benefits outweigh the complexity of managing multiple sources of truth.
+
+:::
+
+### `infrastructure-catalog` workflows
+
+- **Tests:** This is where your team can validate that your reusable infrastructure patterns are, in-fact, reliably reproducible. Every module gets automatically tested by spinning up real AWS resources, running comprehensive tests with [Terratest](https://terratest.gruntwork.io/), and then cleaning everything after the tests are run. This means your teams can trust that modules actually work consistently before they use them in production. This also means that you have a sandbox for ephemeral infrastructure that you can use to test out experimental changes to your infrastructure patterns before you commit to running them in the long-term in production.
+
+## `infrastructure-live-delegated`
+
+This is where you can start to empower more of your organization outside your central platform team to start managing their own infrastructure independently. **Delegated repositories** are how your organization grows from a small platform team managing everything to hundreds of developers deploying infrastructure independently while maintaining security and compliance best practices.
+
+:::tip
+
+Typical use cases for delegated repositories include:
+
+- Allowing a separate team to independently manage infrastructure relevant to a specific account (e.g. a mobile app team to manage their own database and application infrastructure).
+- Enabling a GitHub Actions workflow in a repository to make restricted changes to infrastructure in a specific account (e.g. a repository with application code may need to build and push a container image to AWS ECR before it's picked up by ArgoCD in the cluster).
+- Allowing a repository's GitHub Actions workflows to have read-only access to select resources within a specific account (e.g. a data science team may need to be granted read-only access to an S3 bucket in an AWS account to run their ML pipelines against real production data).
+
+:::
+
+These repositories represent individual teams or applications that have been granted specific, limited permissions to manage their own infrastructure. Think of them as specialized workshops where each team has exactly the tools they need for their job, but can't accidentally (or intentionally) mess with anyone else's work.
+
+The permissions for each delegated repository are carefully controlled by your `infrastructure-live-access-control` repository. Maybe the mobile app team needs to deploy containers and manage their databases, while the data science team needs different permissions for their ML pipelines. Each team gets exactly what they need, no more and no less.
+
+For Enterprise customers, Account Factory can even automatically create these delegated repositories as part of the account vending process. Request a new AWS account (or set of AWS accounts) for your team, and you automatically get a corresponding repository with all the right permissions to manage infrastructure in those account(s).
+
+## How it all fits together
+
+Here's how these repositories work together to create a scalable, secure infrastructure management system:
+
+```mermaid
+erDiagram
+ infra-live-root ||--o| infra-live-access-control : "Delegated Access Control"
+ infra-live-access-control ||--o{ infra-live-delegated : "Delegated Infrastructure Management"
+ infra-live-root ||--o{ infra-live-delegated : "Vended (Enterprise)"
+ infra-live-root ||--o| infra-catalog : ""
+ infra-live-access-control ||--o| infra-catalog: ""
+ infra-live-delegated }o--o| infra-catalog: ""
+```
+
+:::note
+
+We've abbreviated `infrastructure` as `infra` in the diagram for brevity.
+
+:::
+
+**The flow in practice:**
+
+1. **Foundations first:** Your `infrastructure-live-root` repository sets up the foundational AWS infrastructure that everything else depends on: accounts, networking, security services.
+
+2. **Shared components:** The `infrastructure-catalog` provides reusable, tested modules that any team can use, ensuring consistency and reducing duplicate work across your organization.
+
+3. **Permissions next:** The `infrastructure-live-access-control` repository defines who can do what in each AWS account, creating the guardrails that keep your infrastructure secure as it scales.
+
+4. **Teams get autonomy:** Individual `infrastructure-live-delegated` repositories give teams the ability to manage their own infrastructure within the boundaries set by access control policies.
+
+This topology grows with you: start simple with just the root repository, build out your shared components in your catalog, add access control as you scale, introduce delegated repositories as teams need more autonomy.
diff --git a/docs/2.0/docs/accountfactory/architecture/security-controls.md b/docs/2.0/docs/accountfactory/architecture/security-controls.md
new file mode 100644
index 0000000000..6e584f5c45
--- /dev/null
+++ b/docs/2.0/docs/accountfactory/architecture/security-controls.md
@@ -0,0 +1,250 @@
+# Controls
+
+Gruntwork Account Factory employs a defense-in-depth approach to secure workflows across both GitHub and GitLab platforms. This document outlines the controls Account Factory uses to ensure that only infrastructure written in code and approved by a reviewer can be deployed to your AWS accounts.
+
+Account Factory relies on Pipelines to drive infrastructure changes via GitOps workflows, so make sure to read the [Pipelines security controls](/2.0/docs/pipelines/architecture/security-controls) for more details on how Pipelines secures workflows.
+
+## Least privilege principle
+
+Account Factory adheres to the principle of least privilege, configuring the AWS IAM roles vended as part of Account Factory onboarding to grant only the necessary permissions for infrastructure actions relevant for Account Factory to operate correctly, and to only trust the `infrastructure-live-root` repository for role assumption.
+
+By default, the only repository/group required to interact with infrastructure using Pipelines in Account Factory is the `infrastructure-live-root` repository/group. This contains the Infrastructure as Code for `management`, `logs`, `security`, and `shared` accounts. Access should be limited to a small, trusted group responsible for defining critical infrastructure, similar to the role of the `root` user in Unix systems.
+
+The roles used by the `infrastructure-live-root` repository are divided by responsibility:
+
+- Plan roles: Every account is provisioned with an AWS IAM role that `infrastructure-live-root` is trusted to assume from any branch that has read-only permissions to the resources in that account.
+- Apply roles: Every account is provisioned with an AWS IAM role that `infrastructure-live-root` is trusted to assume on the deploy branch (e.g. `main`) with read-write permissions to the resources in that account.
+
+## Platform-Specific Access Controls
+
+import Tabs from "@theme/Tabs"
+import TabItem from "@theme/TabItem"
+
+
+
+
+- The AWS IAM role assumed via OIDC when pull requests are opened or updated has a trust policy that restricts access to the repository itself and provides read-only permissions
+- The AWS IAM role assumed via OIDC when pull requests are merged into the `main` branch has a trust policy limiting access to the repository's `main` branch and granting write permissions
+- Branch protection rules can be configured to require reviews and status checks
+- GitHub App or machine user authentication options available
+
+
+
+
+- The AWS IAM role assumed via OIDC when merge requests are opened or updated has a trust policy that restricts access to the group itself and provides read-only permissions
+- The AWS IAM role assumed via OIDC when merge requests are merged into the `main` branch has a trust policy limiting access to the group's `main` branch and granting write permissions
+- Protected branches can be configured to require approvals and pipeline success
+- Machine user authentication required with group-level access configuration
+
+
+
+
+## Infrastructure access control
+
+An optional `infrastructure-live-access-control` repository/group can manage access control for infrastructure provisioned in AWS accounts. Using this is a best practice for centralized and auditable access management.
+
+- Access to the `main` branch should be restricted to a small, trusted group managing infrastructure access
+- The same least privilege principles apply: roles assumed for pull/merge requests have read-only permissions, while roles for merged changes have write permissions
+
+Unlike the infrastructure-live-root repository, this repository focuses on managing access control rather than defining infrastructure. You might grant write access to a broader group for managing access while maintaining tight control over the main branch. Encourage collaboration between platform teams and application engineers to review and refine access control continuously.
+
+## CI/CD Token Strategy
+
+
+
+
+There are two ways GitHub users can configure Source Control Management (SCM) authentication for Account Factory:
+
+- The [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io)
+- [GitHub Machine Users](/2.0/docs/pipelines/installation/viamachineusers)
+
+In general, we recommend using the Gruntwork.io GitHub App when possible, as it provides a more feature-rich, reliable and secure experience.
+
+Reasons you might need to set up Machine Users include:
+
+- Your organization does not allow installation of third-party GitHub apps.
+- You are using Self-hosted GitHub Enterprise, and cannot use third-party GitHub apps due to your server settings.
+- You are using a different SCM platform (e.g. GitLab).
+- You want a fallback mechanism in case the Gruntwork.io GitHub App is temporarily unavailable.
+
+### GitHub App Installation Strategy (Recommended)
+
+No long-lived tokens are stored when using the Gruntwork.io GitHub App. Instead, short-lived tokens are generated at runtime on-demand using the Gruntwork.io GitHub App for authentication with GitHub.
+
+### Machine Users Installation Strategy
+
+Requires the following tokens be created:
+
+- `PIPELINES_READ_TOKEN`: Classic PAT with read access to required repositories
+- `INFRA_ROOT_WRITE_TOKEN`: Fine-grained PAT with read/write access to infrastructure repositories
+- `ORG_REPO_ADMIN_TOKEN`: Fine-grained PAT with admin access for repository management
+
+See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers) for more details.
+
+
+
+
+Requires the following tokens be created:
+
+- `PIPELINES_GITLAB_TOKEN`: A GitLab access token with `api` scope
+- `PIPELINES_GITLAB_READ_TOKEN`: A GitLab access token with `read_repository` scope
+
+See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers) for more details.
+
+Pipelines will also require access to Gruntwork's GitHub repositories, however those tokens are generated at runtime via the Gruntwork Management Portal.
+
+
+
+
+## AWS credentials
+
+Pipelines requires IAM roles configured with trust policies to use OpenID Connect (OIDC) with your CI/CD platform. This eliminates the need for long-lived AWS credentials stored as secrets.
+
+### OpenID Connect Configuration
+
+Pipelines provisions an OpenID Connect identity provider in AWS IAM for each account, setting GitHub/GitLab as the provider and restricting the audience to AWS STS and your GitHub/GitLab organization. The Pipelines IAM role's trust policy ensures:
+
+- Only a single repository in your GitHub/GitLab organization can assume the role for plans.
+- Only a single branch can assume the role for applies/destroys.
+
+For more details, see the [official AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html). Below is an example of a trust policy used by Pipelines.
+
+
+
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Federated": "arn:aws:iam::0123456789012:oidc-provider/token.actions.githubusercontent.com"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity",
+ "Condition": {
+ "StringLike": {
+ "token.actions.githubusercontent.com:sub": "repo:acme/infrastructure-live-root:ref:*"
+ }
+ }
+ }
+ ]
+}
+```
+
+
+
+
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Federated": "arn:aws:iam::0123456789012:oidc-provider/gitlab.com"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity",
+ "Condition": {
+ "StringLike": {
+ "gitlab.com:sub": "project_path:acme/projectprefix*:*"
+ }
+ }
+ }
+ ]
+}
+
+
+```
+
+
+
+
+Refer to [Configuring OpenId Connect in Amazon Web Services](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services) for additional details.
+
+### Roles provisioned by Account Factory
+
+Pipelines automatically provisions specific roles in AWS accounts to support required infrastructure operations. These roles follow the naming pattern `-pipelines-`.
+
+For example:
+
+- The `root-pipelines-plan` role is used by Pipelines to plan changes in the `infrastructure-live-root` repository.
+
+These roles are designed to operate in a single repository and include a trust policy that only permits GitHub Actions workflows triggered by that repository to assume the role. Each role is provisioned in pairs:
+
+- `plan` roles, with read-only permissions, are used to execute Terragrunt plans for open pull requests.
+- `apply` roles, with read/write permissions, are used to apply or destroy infrastructure changes for merged pull requests or direct pushes to the deploy branch (commonly `main`).
+
+This separation ensures that controls like [branch protection](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) and [CODEOWNERS files](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) can effectively govern infrastructure changes.
+
+#### `root-pipelines-plan`
+
+A read-only plan role for the `infrastructure-live-root` repository.
+
+- This role is one of the first created when setting up Account Factory. It is provisioned manually by the customer during the platform setup process.
+- It exists in all accounts and handles tasks necessary for setting up AWS accounts.
+- These roles are highly permissive among read-only roles as they manage foundational AWS account setups.
+
+#### `root-pipelines-apply`
+
+A read/write apply role for the `infrastructure-live-root` repository.
+
+- Like the plan role, this is one of the initial roles created during setup.
+- It is broadly permissive to support foundational account setups and bootstrapping.
+
+#### `access-control-pipelines-plan`
+
+A read-only plan role for the `infrastructure-live-access-control` repository.
+
+- These roles are provisioned for new accounts but are not included in core accounts such as `management`, `logs`, `security`, or `shared`.
+- They manage IAM roles and policies for vended accounts, facilitating infrastructure access control.
+
+#### `access-control-pipelines-apply`
+
+A read/write apply role for the `infrastructure-live-access-control` repository.
+
+- Similar to the plan role, these roles are provisioned for vended accounts but excluded from core accounts.
+- They have permissions to manage IAM roles and policies for the accounts where they are provisioned.
+
+#### `delegated-pipelines-plan`
+
+A read-only plan role for delegated repositories, used by Pipelines Enterprise customers.
+
+- These roles are pre-configured to have minimal permissions, primarily for managing OpenTofu/Terraform state.
+- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary.
+- Users should ensure that only the necessary _read-only_ permissions are granted for the specific delegated repository.
+
+:::note
+
+These roles have almost no permissions by default. They are pre-configured by default to only have access to OpenTofu/Terraform state, and the pull requests that are opened to provision them include documentation on how to add additional permissions as appropriate.
+
+It is up to the user provisioning these roles to ensure that this role has only the necessary _read-only_ permissions required to manage infrastructure changes relevant to the delegated repository.
+
+:::
+
+#### `delegated-pipelines-apply`
+
+A read/write apply role for delegated repositories.
+
+- Similar to the plan role, these roles are pre-configured with minimal permissions and are intended for managing OpenTofu/Terraform state.
+- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary.
+- Users must ensure that the role has only the necessary _read/write_ permissions required for the delegated repository.
+
+:::note
+The `delegated-pipelines-plan` and `delegated-pipelines-apply` roles are automatically provisioned for new delegated accounts. Enterprise customers will see pull requests created in the `infrastructure-live-access-control` repository to vend these roles with proper configurations.
+:::
+
+## Trust boundaries
+
+A critical aspect of Pipelines' architecture is understanding its trust model. Since Pipelines runs within a CI/CD system, it has privileged access to your infrastructure resources (e.g. AWS accounts, VPCs, EC2 instances, etc.).
+
+Anyone with the ability to edit code in the `main` branch of your repositories inherently has the authority to make corresponding changes in your infrastructure resources. For this reason, it is important to follow the [Repository Access](/2.0/docs/pipelines/installation/viamachineusers#repository-access) guidelines to ensure appropriate access control.
+
+:::tip
+
+Each AWS IAM role provisioned through setup of [Gruntwork Account Factory](https://docs.gruntwork.io/account-factory/overview) is configured to trust a single repository (and, for apply roles, a single branch). If a role's permissions become overly broad, consider creating a new role with more granular permissions tailored to the specific use case. Use the `infrastructure-live-access-control` repository to define and manage these roles.
+
+:::
diff --git a/docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx b/docs/2.0/docs/accountfactory/guides/setup-delegated-repo.mdx
similarity index 99%
rename from docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx
rename to docs/2.0/docs/accountfactory/guides/setup-delegated-repo.mdx
index e68dd8fc26..1f25ee7a27 100644
--- a/docs/2.0/docs/pipelines/guides/setup-delegated-repo.mdx
+++ b/docs/2.0/docs/accountfactory/guides/setup-delegated-repo.mdx
@@ -19,7 +19,7 @@ Delegating infrastructure management might be necessary for reasons such as:
For example, a repository with application code may need to build and push a container image to AWS ECR before deploying it to a Kubernetes cluster.
-The following guide assumes you have completed the [Pipelines Setup & Installation](/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md).
+The following guide assumes you have completed the [Pipelines Setup & Installation](/2.0/docs/accountfactory/prerequisites/awslandingzone).
## Step 1 - Verify the delegated account setup
diff --git a/docs/2.0/docs/accountfactory/guides/vend-aws-account.md b/docs/2.0/docs/accountfactory/guides/vend-aws-account.md
index 61f2d0bc71..204a60cc6d 100644
--- a/docs/2.0/docs/accountfactory/guides/vend-aws-account.md
+++ b/docs/2.0/docs/accountfactory/guides/vend-aws-account.md
@@ -1,37 +1,72 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
# Using the Account Factory Workflow
-## Introduction
+## Generate the account-request file
+
+There are currently two ways to generate the account-request file:
+
+1. Using the [Gruntwork Developer Portal](/2.0/docs/accountfactory/guides/vend-aws-account?account-creation-method=ui#using-the-gruntwork-developer-portal) (**GitLab and GitHub non-enterprise customers**)
+2. Using the [Account Factory workflow in your repository](/2.0/docs/accountfactory/guides/vend-aws-account?account-creation-method=workflow#using-the-account-factory-workflow-in-your-repository) (**GitHub only**)
+
+
+
+
+
+### Using the Gruntwork Developer Portal
+
+1. Navigate to the [Account Request](https://app.gruntwork.io/account-factory/request-generator) page in the Gruntwork Developer Portal, as an authenticated user, to access the request generator UI.
+
+ 
+
+1. If the form is disabled for filling out, request that an Admin in your Gruntwork Developer Account configures the Account factory settings.
+1. Fill out the form with the required information and click on the "Generate Account Request" button.
+1. Use the "Download" button to download the account-request file or the "Copy" button to copy the account-request file to your clipboard.
+1. Navigate to your repository and create a new branch.
+1. Create a new file in the `_new-account-requests` directory by moving the downloaded file to the directory or by creating a new file with the content of your clipboard. If copying content, ensure that the file is created with the correct name displayed in the generator output.
+1. Commit your changes and open a Pull Request to the main branch.
+
+
+
+
+### Using the Account Factory workflow in your repository
+
+:::info
+Only available for GitHub customers. This guide focuses on non-delegated repositories. Enterprise GitHub customers can also [use Account Factory to create new Delegated Repositories](/2.0/docs/accountfactory/guides/delegated-repositories).
+:::
The Account Factory Workflow in your `infrastructure-live-root` repository can be used to create new AWS accounts. It requires a single input—a JSON payload—generated from the `account-factory-inputs.html` web page.
The JSON payload approach provides greater flexibility for account vending, overcoming the GitHub workflow restriction of a 10-input maximum.
+#### Step 1 - Download the file
+
:::note
This guide focuses on non-delegated repositories. Enterprise customers can also [use Account Factory to create new Delegated Repositories](/2.0/docs/accountfactory/guides/delegated-repositories).
:::
-
-### Step 1 - Download the file
-
Locate the inputs web page in your `infrastructure-live-root` repository at `.github/workflows/account-factory-inputs.html` and download it to your local machine.
-### Step 2 - Populate the values
+#### Step 2 - Populate the values
Open the downloaded `account-factory-inputs.html` file in a web browser and populate the input fields as required.
Once all values are filled, click "Generate" and copy the resulting JSON output to your clipboard.
-### Step 3 - Run the Account Factory workflow
+#### Step 3 - Run the Account Factory workflow
Access the Actions tab in your `infrastructure-live-root` repository on GitHub and select `Account factory` from the left-hand pane.
Click "Run workflow" on the right, paste the generated JSON payload into the dropdown, and click the green "Run workflow" button to initiate the workflow.
+After the workflow is complete, a new Pull Request will be created in the `infrastructure-live-root` repository. This PR will add an account request to the `_new-account-requests` directory.
-### Step 4 - Merge the account request PR
+
+
-After the workflow is complete, a new Pull Request will be created in the `infrastructure-live-root` repository. This PR will add an account request to the `_new-account-requests` directory.
+## Review and merge the account request PR
Review and merge the Pull Request to begin the account creation process.
@@ -43,7 +78,7 @@ Once the account request PR merges into the main branch, Pipelines will initiate
When the account is successfully created, Pipelines will open another Pull Request to baseline the account.
-### Step 5 - Merge the Account Baseline PR
+## Review and merge the Account Baseline PR
Review and merge the Account Baseline Pull Request. This PR contains essential infrastructure for enabling your delegated repository to plan and apply infrastructure changes in AWS. It also includes account baselines and configured account-specific infrastructure, such as a VPC.
diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.md b/docs/2.0/docs/accountfactory/installation/addingnewrepo.md
similarity index 86%
rename from docs/2.0/docs/pipelines/installation/addingnewrepo.md
rename to docs/2.0/docs/accountfactory/installation/addingnewrepo.md
index 9001969540..7ea6610b80 100644
--- a/docs/2.0/docs/pipelines/installation/addingnewrepo.md
+++ b/docs/2.0/docs/accountfactory/installation/addingnewrepo.md
@@ -1,6 +1,6 @@
-# Initial Setup
+# Adding Account Factory to a new repository
-To configure Gruntwork Pipelines in a new GitHub repository, complete the following steps:
+To configure Gruntwork Account Factory in a new GitHub repository, the following steps are required (and will be explained in detail below):
1. Create your `infrastructure-live-root` repository using Gruntwork's GitHub template.
2. Configure the Gruntwork.io GitHub App to authorize your `infrastructure-live-root` repository, or ensure that the appropriate machine user tokens are set up as repository or organization secrets.
@@ -23,7 +23,7 @@ Navigate to the template repository and select **Use this template** -> **Create
Use the Gruntwork.io GitHub App to [add the repository as an Infra Root repository](/2.0/docs/pipelines/installation/viagithubapp#configuration).
-If using the [machine user model](/2.0/docs/pipelines/installation/viamachineusers.md), ensure the `INFRA_ROOT_WRITE_TOKEN` (and `ORG_REPO_ADMIN_TOKEN` for enterprise customers) is added to the repository as a secret or configured as an organization secret.
+If using the [machine user model](/2.0/docs/pipelines/installation/viamachineusers), ensure the `INFRA_ROOT_WRITE_TOKEN` (and `ORG_REPO_ADMIN_TOKEN` for enterprise customers) is added to the repository as a secret or configured as an organization secret.
## Updating the Bootstrap Workflow
@@ -47,5 +47,6 @@ Each of your repositories will contain a Bootstrap Pull Request. Follow the inst
:::info
-The bootstrapping pull requests include pre-configured files, such as a `mise.toml` file that specifies versions of OpenTofu and Terragrunt. Ensure you review and update these configurations to align with your organization's requirements.
+The bootstrapping pull requests include pre-configured files, such as a `.mise.toml` file that specifies versions of OpenTofu and Terragrunt. Ensure you review and update these configurations to align with your organization's requirements.
+
:::
diff --git a/docs/2.0/docs/accountfactory/installation/index.md b/docs/2.0/docs/accountfactory/installation/index.md
index 11a240e3b7..f67e6fafd1 100644
--- a/docs/2.0/docs/accountfactory/installation/index.md
+++ b/docs/2.0/docs/accountfactory/installation/index.md
@@ -2,23 +2,20 @@
## Overview
-Account Factory is automatically integrated into [new Pipelines root repositories](/2.0/docs/pipelines/installation/addingnewrepo) during the bootstrapping process.
+Account Factory is automatically integrated into [new Pipelines root repositories](/2.0/docs/accountfactory/installation/addingnewrepo) during the bootstrapping process.
By default, Account Factory includes the following components:
-- 📋 An HTML form for generating workflow inputs: `.github/workflows/account-factory-inputs.html`
-
-- 🏭 A workflow for generating new requests: `.github/workflows/account-factory.yml`
-
-- 🗃️ A root directory for tracking account requests: `_new-account-requests`
-
-- ⚙️ A YAML file for tracking account names and IDs: `accounts.yml`
+- An HTML form for generating workflow inputs: `.github/workflows/account-factory-inputs.html`
+
+- A workflow for generating new requests: `.github/workflows/account-factory.yml`
+
+- A root directory for tracking account requests: `_new-account-requests`
+
+- A YAML file for tracking account names and IDs: `accounts.yml`
For detailed instructions on using these components, refer to the [Vending a New AWS Account Guide](/2.0/docs/accountfactory/guides/vend-aws-account).
## Configuring account factory
Account Factory is fully operational for vending new accounts without requiring any configuration changes. However, a [comprehensive reference for all configuration options is available here](/2.0/reference/accountfactory/configurations), allowing you to customize values and templates for generating Infrastructure as Code (IaC) for new accounts.
-
-
-
diff --git a/docs/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md b/docs/2.0/docs/accountfactory/prerequisites/awslandingzone.md
similarity index 97%
rename from docs/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md
rename to docs/2.0/docs/accountfactory/prerequisites/awslandingzone.md
index 397d5ba301..073482cd19 100644
--- a/docs/2.0/docs/pipelines/installation/prerequisites/awslandingzone.md
+++ b/docs/2.0/docs/accountfactory/prerequisites/awslandingzone.md
@@ -1,11 +1,10 @@
import CustomizableValue from '/src/components/CustomizableValue';
-
# Landing Zone
## Overview
-The Landing Zone component establishes an initial best-practice AWS multi-account setup.
+The Landing Zone component establishes an initial best-practice AWS multi-account setup for use with Gruntwork Account Factory.
## Extending AWS Control Tower
@@ -242,16 +241,15 @@ Complete the following steps to prepare for Gruntwork Account Factory:
3. Switch to the `Users` tab, select your management user from the list and click **Next**
- 4. Select `AWSAdministratorAccess` from the list of Permission Sets, then click **Next**
+ 4. Select `AWSAdministratorAccess` from the list of Permission Sets, then click **Next**
- 5. Click `Submit` to finish assigning access to your user
+ 5. Click `Submit` to finish assigning access to your user
## Next steps
Now that Control Tower is configured, consider these next steps:
+
- [Set up IAM Identity Center](https://docs.aws.amazon.com/singlesignon/latest/userguide/get-started-choose-identity-source.html) for access control.
- [Apply required controls or SCPs](https://docs.aws.amazon.com/controltower/latest/userguide/controls.html).
- [Install Gruntwork Pipelines](/2.0/docs/pipelines/installation/viagithubapp).
- [Set up Gruntwork Account Factory](/2.0/docs/accountfactory/installation).
-
-
diff --git a/docs/2.0/docs/overview/getting-started/index.md b/docs/2.0/docs/overview/getting-started/index.md
index 73113153d7..4d2a3fb34e 100644
--- a/docs/2.0/docs/overview/getting-started/index.md
+++ b/docs/2.0/docs/overview/getting-started/index.md
@@ -7,7 +7,7 @@ Create your Gruntwork account and invite your team members to access Gruntwork r
-### Step 2: [Set up a Landing Zone](/2.0/docs/pipelines/installation/prerequisites/awslandingzone)
+### Step 2: [Set up a Landing Zone](/2.0/docs/accountfactory/prerequisites/awslandingzone)
Follow Gruntwork's AWS Landing Zone walkthrough to implement a best-practice multi-account setup, ready for use with DevOps Foundations.
@@ -19,9 +19,15 @@ Set up authentication for Pipelines to enable secure automation of infrastructur
-### Step 4: [Create new Pipelines repositories](/2.0/docs/pipelines/installation/addingnewrepo)
+### Step 4: Create new Pipelines repositories
-Alternatively, you can [add Pipelines to an existing repository](/2.0/docs/pipelines/installation/addingexistingrepo).
+- [New GitHub repository](/2.0/docs/pipelines/installation/addingnewrepo)
+- [New GitLab repository](/2.0/docs/pipelines/installation/addingnewgitlabrepo)
+
+Alternatively, you can add Pipelines to an existing repository:
+
+- [Existing GitHub repository](/2.0/docs/pipelines/installation/addingexistingrepo)
+- [Existing GitLab repository](/2.0/docs/pipelines/installation/addinggitlabrepo)
diff --git a/docs/2.0/docs/pipelines/architecture/audit-logs.md b/docs/2.0/docs/pipelines/architecture/audit-logs.md
index 3fc94094b4..264ad474e4 100644
--- a/docs/2.0/docs/pipelines/architecture/audit-logs.md
+++ b/docs/2.0/docs/pipelines/architecture/audit-logs.md
@@ -1,8 +1,11 @@
# Audit Logs
-Gruntwork Pipelines provides an audit log that records which user performed specific operations in your AWS accounts as a result of a [Pipelines Action](/2.0/docs/pipelines/architecture/actions.md).
+For certain cloud environments (for now, only AWS), Gruntwork Pipelines provides an audit log that records which user performed specific operations in your AWS accounts as a result of a [Pipelines Action](/2.0/docs/pipelines/architecture/actions.md). Pipelines does this via integration with native tooling for the cloud provider.
+
+## AWS
Accessing AWS environments from a CI/CD system often involves assuming temporary credentials using OpenID Connect (OIDC). For platform-specific documentation, see:
+
- [GitHub OIDC Configuration](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services)
- [GitLab OIDC Configuration](https://docs.gitlab.com/ee/ci/cloud_services/aws/)
@@ -10,11 +13,11 @@ Shared credentials can complicate tracking who performed specific actions in AWS
## How it works
-Gruntwork Pipelines creates an audit log that tracks which user performed what action in which AWS account. It does this by setting the [AWS STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) session name to include the initiating username, the Pipelines name, and the merge request/pull request or branch that triggered the action. Logging is handled through [AWS CloudTrail](https://aws.amazon.com/cloudtrail/), where session names appear in the `User name` field, making it easy to identify which user performed an action. For information on locating logs, see [where you can find logs](#where-you-can-find-logs) and [querying data](#querying-data).
+Gruntwork Pipelines creates an audit log that tracks which user performed what action in which AWS account. It does this by setting the [AWS STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) session name to include the initiating username, the Pipelines name, and the merge/pull request or branch that triggered the action. Logging is handled through [AWS CloudTrail](https://aws.amazon.com/cloudtrail/), where session names appear in the `User name` field, making it easy to identify which user performed an action. For information on locating logs, see [where you can find logs](#where-you-can-find-logs) and [querying data](#querying-data).
### What gets logged
-Logs are generated for all operations performed by Gruntwork Pipelines across every AWS account. These logs leverage [AWS STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) session names to clearly label sessions with the username that requested the change and the associated merge request/pull request or branch.
+Logs are generated for all operations performed by Gruntwork Pipelines in AWS across every AWS account. These logs leverage [AWS STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) session names to clearly label sessions with the username that requested the change and the associated merge/pull request or branch.
Each CloudTrail event linked to API calls from Pipelines [Actions](/2.0/docs/pipelines/architecture/actions.md) includes the session name in the `userIdentity` field. For example, if the user `SomeUserInYourOrg` initiated the 123rd request in your repository, the `userIdentity` field in a corresponding CloudTrail event would provide details such as the following.
@@ -58,13 +61,17 @@ By combining this data with a [query service](#querying-data), you can analyze t
Pipelines employs a naming scheme that integrates the user who triggered the Pipelines [Action](/2.0/docs/pipelines/architecture/actions.md) along with the request or branch that initiated the action. The AWS STS session name is formatted as follows:
`-via-GWPipelines@(PR-|)`.
-#### For merge request/pull request events
+#### For merge/pull request events
+
When Pipelines runs in response to a request event (opened, updated, or reopened), the session name includes the user who made the most recent commit on the branch and the request number. For instance:
+
- If the user `SomeUserInYourOrg` created request number `123`, the session name would be:
`SomeUserInYourOrg-via-GWPipelines@PR-123`.
#### For merged requests
+
When Pipelines runs after a request is merged, the session name reflects the user who performed the merge and the deploy branch name (e.g., `main`). For example:
+
- If the user `SomeUserInYourOrg` merged a request to the branch `main`, the session name would be:
`SomeUserInYourOrg-via-GWPipelines@main`.
diff --git a/docs/2.0/docs/pipelines/architecture/ci-workflows.md b/docs/2.0/docs/pipelines/architecture/ci-workflows.md
index e941dd3f97..65ca5b4d55 100644
--- a/docs/2.0/docs/pipelines/architecture/ci-workflows.md
+++ b/docs/2.0/docs/pipelines/architecture/ci-workflows.md
@@ -21,6 +21,7 @@ jobs:
include:
- component: gitlab.com/gruntwork-io/pipelines-workflows/pipelines@3
```
+
## Workflow versioning
@@ -42,33 +43,32 @@ If you [fork the Gruntwork Workflows](https://docs.gruntwork.io/2.0/docs/pipelin
The `pipelines-workflows` repository includes the following reusable workflows:
-- `pipelines-drift-detection.yml` - Used for [Pipelines Drift Detection](/2.0/docs/pipelines/concepts/drift-detection) in all repositories with Drift Detection installed.
-- `pipelines-root.yml` - The core Pipelines workflow for the `infrastructure-live-root` repository, providing core plan/apply functionality and account vending.
-- `pipelines-unlock.yml` - Used to manually unlock state files in all repositories.
+- `pipelines-drift-detection.yml` - (Enterprise only) Used for [Pipelines Drift Detection](/2.0/docs/pipelines/concepts/drift-detection) in all repositories with Drift Detection installed.
+- `pipelines-root.yml` - (Account Factory only) The core Pipelines workflow for the `infrastructure-live-root` repository, providing core plan/apply functionality and account vending.
+- `pipelines-unlock.yml` - (AWS only) Used to manually unlock state files in all repositories.
- `pipelines.yml` - The core Pipelines workflow for `infrastructure-live-access-control` and delegated repositories, supporting plan/apply operations.
+If you are using [Gruntwork Account Factory](/2.0/docs/accountfactory/concepts/), the following workflows are typically present:
-In your repositories, the following workflows are typically present:
-
-#### infrastructure-live-root
+### infrastructure-live-root
- `account-factory.yml` - A standalone workflow independent of `pipelines-workflows`.
- `pipelines-drift-detection.yml` (Enterprise only) - Uses the Gruntwork `pipelines-drift-detection.yml` workflow.
- `pipelines-unlock.yml` - Uses the Gruntwork `pipelines-unlock.yml` workflow.
- `pipelines.yml` - Uses `pipelines-root.yml`.
-#### infrastructure-live-access-control
+
+### infrastructure-live-access-control
- `pipelines-drift-detection.yml` (Enterprise only) - Uses the Gruntwork `pipelines-drift-detection.yml` workflow.
-- `pipelines-unlock.yml` - Uses the Gruntwork `pipelines-unlock.yml` workflow.
+- `pipelines-unlock.yml` - Uses the Gruntwork `pipelines-unlock.yml` workflow (AWS only).
- `pipelines.yml` - Uses `pipelines.yml`.
-#### infrastructure-live-delegated ([Vended Delegated Repositories](/2.0/docs/accountfactory/guides/delegated-repositories))
+### infrastructure-live-delegated ([Vended Delegated Repositories](/2.0/docs/accountfactory/guides/delegated-repositories))
- `pipelines-drift-detection.yml` - Uses the Gruntwork `pipelines-drift-detection.yml` workflow.
- `pipelines-unlock.yml` - Uses the Gruntwork `pipelines-unlock.yml` workflow.
- `pipelines.yml` - Uses `pipelines.yml`.
-
diff --git a/docs/2.0/docs/pipelines/architecture/components.md b/docs/2.0/docs/pipelines/architecture/components.md
deleted file mode 100644
index 902c4c2338..0000000000
--- a/docs/2.0/docs/pipelines/architecture/components.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Components Architecture
-
-Pipelines consists of two main components: the orchestrator and the executor. The orchestrator identifies necessary jobs, while the executor performs those tasks and updates AWS resources accordingly.
-
-## Orchestrator
-
-The orchestrator analyzes each infrastructure change in a pull request or git commit, categorizes the type of change (e.g., `AccountsAdded`, `ModuleChanged`, `EnvCommonChanged`), and identifies the appropriate pipelines actions (e.g., `terragrunt plan`, `apply`, or `destroy`) to execute based on the type of change.
-
-## Executor
-
-The executor receives a pipeline action and an infrastructure change as input and executes the specified action on the change. For example, when responding to `AccountsAdded` events on merge, the executor may create a follow-up pull request in the `infrastructure-live-root` repository to include additional IaC code for baselining the newly added accounts.
-
-## Execution flow
-
-Pipelines begins with an event in GitHub/GitLab, such as the creation, update, or reopening of a merge request/pull request, or a push to `main` (e.g., merging a pull request). The orchestrator determines the set of infrastructure changes (`infra-change set`) and selects the appropriate action for each change. For every change in the set, the executor performs the necessary action and logs the results in GitHub/GitLab, attaching them to the merge request/pull request that triggered the workflow.
-
-## Trust boundaries
-
-A critical aspect of Pipelines' architecture is understanding its trust model. Since Pipelines runs within a CI/CD system, it has privileged access to your AWS accounts.
-
-Anyone with the ability to edit code in the `main` branch of your repositories inherently has the authority to make corresponding changes in your AWS accounts. For this reason, it is important to follow the [Repository Access](/2.0/docs/pipelines/installation/viamachineusers#repository-access) guidelines to ensure appropriate access control.
-
-Additionally, each AWS IAM role provisioned through DevOps Foundations is configured to trust a single repository (and, for apply roles, a single branch). If a role's permissions become overly broad, consider creating a new role with more granular permissions tailored to the specific use case. Use the `infrastructure-live-access-control` repository to define and manage these roles.
diff --git a/docs/2.0/docs/pipelines/architecture/execution-flow.md b/docs/2.0/docs/pipelines/architecture/execution-flow.md
new file mode 100644
index 0000000000..dbaa9f1326
--- /dev/null
+++ b/docs/2.0/docs/pipelines/architecture/execution-flow.md
@@ -0,0 +1,15 @@
+# Execution flow
+
+Pipelines begins doing work in response to an event in GitHub/GitLab, such as the creation, update, or merging of a pull/merge request, or a push to a deploy branch (e.g., `main` or `master`). Pipelines does this in the native CI/CD feature offered by Source Control Management (SCM) platforms (GitHub Actions for GitHub, GitLab CI/CD Pipelines for GitLab).
+
+The flow of this work consists of two main stages: the orchestrator and the executor. The orchestrator identifies and categorizes work into a set of infrastructure changes (`infra-change set`) based on the contents of a pull/merge request or push to the deploy branch, while the executor performs those tasks and updates infrastructure accordingly.
+
+## Orchestrator
+
+The orchestrator analyzes each infrastructure change in a pull request or git commit, categorizes the type of change (e.g., `AccountsAdded`, `ModuleChanged`, `EnvCommonChanged`), and identifies the appropriate pipelines actions (e.g., `terragrunt plan`, `apply`, or `destroy`) to execute based on the type of change.
+
+## Executor
+
+The executor receives as inputs a pipeline action (e.g. `terragrunt plan`) and a specific unit of infrastructure that has been changed (e.g. `/path/to/changed-unit/terragrunt.hcl`) and executes the specified action on the specified unit.
+
+For example, when responding to a `ModuleUpdated` event for `/some/unit/terragrunt.hcl`, the executor might execute a `terragrunt apply` on `/some/unit/terragrunt.hcl`. Or when responding to `AccountsAdded` events on merge, the executor may create a follow-up pull request in the `infrastructure-live-root` repository to include additional IaC code for baselining the newly added accounts.
diff --git a/docs/2.0/docs/pipelines/architecture/index.md b/docs/2.0/docs/pipelines/architecture/index.md
index c810f39522..da3951ad39 100644
--- a/docs/2.0/docs/pipelines/architecture/index.md
+++ b/docs/2.0/docs/pipelines/architecture/index.md
@@ -1,129 +1,218 @@
# Architecture
-Gruntwork Pipelines is designed to provide flexibility, enabling you to utilize the components you need to manage your infrastructure in a way that aligns with your organization's requirements.
+Gruntwork Pipelines, at its core, is a single binary that customers can download using their Gruntwork subscription to "do the right thing" when it comes to managing Infrastructure as Code using Terragrunt using GitOps workflows. Pipelines runs in customer CI/CD pipelines (not Gruntwork servers or any other dedicated server you have to maintain) to handle all the complexity of performing IaC CI/CD.
+Outside of the main binary, Pipelines has several other components that work together to make it all work. These components are all provided by Gruntwork, and we work with our customers to ensure that they are configured correctly to work together.
-Understanding the components and their structure will help you use Pipelines and associated Infrastructure as Code (IaC) effectively.
+## CI/CD pipelines
-## `infrastructure-live-root`
+By design, customers run the binary as part of their CI/CD pipelines (e.g. GitHub Actions, GitLab CI, etc.). As such, Gruntwork provides out-of-the-box CI/CD configurations for supported platforms when customers sign up for Gruntwork Pipelines.
-The `infrastructure-live-root` repository serves as the root of your infrastructure and is provisioned using the [infrastructure-live-root-template](https://github.com/gruntwork-io/infrastructure-live-root-template) template repository.
+We likewise provide CI/CD configurations for [Gruntwork Account Factory](https://docs.gruntwork.io/account-factory/overview).
-This repository is where you manage sensitive resources such as the Landing Zone and Organization services for AWS. Typically, access to this repository is restricted to a small group of trusted users.
+When using Gruntwork Pipelines without Gruntwork Account Factory, customers are responsible for configuring their repositories to use the appropriate CI/CD configuration for that platform (see [Adding Pipelines to an Existing Repository](/2.0/docs/pipelines/installation/addingexistingrepo) for more information). This code is typically fairly minimal, and the majority of the work is done by reusable workflows made available by Gruntwork, and the binary itself.
-All other infrastructure managed with Gruntwork software ultimately depends on resources configured in this repository.
+## Cloud resources
-### Workflows
+When Pipelines is used to manage infrastructure resources in cloud environments, it needs to be able to authenticate to the cloud provider. Pipelines supports authentication out of the box for the following cloud providers:
-- **Account Factory:** (GitHub only) Provides an API for interacting with the Gruntwork Account Factory. It uses a [repository dispatch](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#repository_dispatch) to create AWS account requests.
+- [AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws)
+- [Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure)
+- [Custom](/2.0/docs/pipelines/concepts/cloud-auth/custom) (build your own support for your cloud provider)
- This workflow uses a [repository dispatch](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#repository_dispatch) to create a standard AWS account creation request in the repository.
+The way in which Pipelines authenticates to these cloud providers (with the exception of Custom authentication) is via OpenID Connect (OIDC).
- The workflow payload is a JSON object, which can be constructed using the sample HTML file included in the repository. This file can be customized for organizational needs, such as adding tagging fields or additional context.
+With OIDC, Pipelines is able to generate temporary credentials for granular authentication to cloud providers, using the context of the pull request or push to the deploy branch.
- :::tip
+e.g. When a pull request is opened, Pipelines might determine that the IaC being edited is in an [environment](/2.0/reference/pipelines/configurations-as-code#environment-blocks) that is configured to authenticate to AWS using OIDC, and generate temporary read-only credentials (as that's what's required for plans in pull requests) to authenticate with AWS using the role defined for that environment.
- This HTML file can be customized as needed to fit the needs of your organization. You may want to add additional fields to the form for tagging purposes, or to provide additional context to the person who will be approving the account vend.
+## SCM Authentication
- You don't even need to use the form at all if you don't want to. An alternative would be to use a service like ServiceNow to populate the requisite fields, then trigger the workflow using the GitHub API.
+In addition to authenticating to cloud providers, Pipelines also needs to be able to authenticate to Source Control Management (SCM) platforms (e.g. GitHub, GitLab) to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself).
- You can learn more about this [here](/2.0/docs/accountfactory/guides/vend-aws-account).
+Pipelines supports authentication to the following SCM platforms:
- :::
+- GitHub
+- GitLab
-- **Pipelines:** Manages infrastructure changes in a GitOps fashion. While the workflow permissions are mostly read-only for proposing changes, they include write access to apply infrastructure changes upon merging.
+The way in which Pipelines authenticates to these SCM platforms differs slightly, due to the different ways in which they support authentication, and the needs of customers.
-:::tip
+### Gruntwork.io GitHub App Authentication
-The `infrastructure-live-root` repository can be renamed during the bootstrap process, but giving it a similar name to `infrastructure-live-root` is recommended for clarity when using Gruntwork documentation. Multiple root repositories can be created if needed, but this increases complexity and operational overhead.
-It also doesn't have to be the only root repository in your organization. You can have multiple root repositories if you have a complex organization structure that requires it. Make sure to evaluate the trade-offs of having multiple root repositories before doing so. It can be a significant source of complexity and operational overhead.
+Customers using Github as their SCM platform can install the [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) to authenticate with Github. This app provides a more feature-rich and secure experience, and is the recommended method of authentication for most customers if they can use it.
-:::
+When using the Gruntwork.io GitHub App, users are able to avoid the need to provision any machine users or long-lived Personal Access Tokens (PATs) to authenticate with Github to download the Pipelines binary or access other repositories they want to access in their CI/CD pipelines. Control of the permissions the Gruntwork.io GitHub App grants when installed is done via the [Gruntwork Developer Portal](#gruntwork-developer-portal).
-## `infrastructure-live-access-control`
+### GitHub Machine Users Authentication
-The `infrastructure-live-access-control` repository manages access control for your infrastructure and is provisioned during the bootstrap process in the `infrastructure-live-root` repository. While only necessary for Enterprise customers, it is recommended for all users.
+Customers using GitHub as their SCM platform, but are unable (or don't wish to) use the Gruntwork.io GitHub App, and customers using a different SCM platform (e.g. GitLab) use GitHub Machine Users to authenticate with GitHub.
-This repository handles IAM roles, policies, and permissions for delegated infrastructure management. It allows application engineers to propose access control changes, while a central platform team reviews and approves them.
+All customers must authenticate with GitHub in some way for the core requirement to download the Pipelines binary using their Gruntwork subscription. Customers that are using GitHub as their SCM platform, but aren't using the Gruntwork.io GitHub App also use GitHub Machine Users to authenticate with GitHub to download IaC code and reusable GitHub Actions code. Customers using a different SCM platform (e.g. GitLab) only use GitHub Machine Users to authenticate with GitHub to download the Pipelines binary.
-More access can be granted to this repository than the `infrastructure-live-root` repository, but it should still be treated as a sensitive resource. Organizations typically find it useful to have restrictions on who can approve and merge changes to this repository, and allow for users to propose changes in a self-service fashion. This allows for application workload engineers to propose changes to the access control their workflows need, while allowing a central platform team to review and approve those changes instead of having to author the changes themselves.
+### GitLab Machine Users Authentication
-:::info
+Customers using GitLab as their SCM platform use GitLab Machine Users to authenticate with GitLab to download reusable GitLab CI/CD code, and access other IaC repositories.
-Delegated infrastructure management is the practice of allowing developers to manage infrastructure in a self-service fashion.
+## Gruntwork Developer Portal
-This is in contrast to centralized infrastructure management, where a small number of people manage all the infrastructure for the entire organization.
+The Gruntwork Developer Portal (hosted at [app.gruntwork.io](https://app.gruntwork.io)) is a web-based interface that customers use to manage their Gruntwork subscription. This includes the ability to install the Gruntwork.io GitHub App and associate it with a Gruntwork organization, and manage the access that the app has to relevant GitHub resources in customer GitHub organizations.
-Organizations frequently use a combination of both centralized and delegated infrastructure management to balance the need for control and security with the need for agility and speed.
+It is also used to manage the access that GitHub Machine Users have to relevant Gruntwork owned repositories, made available via customer Gruntwork subscriptions.
-e.g. Centralized management for core infrastructure like AWS accounts, VPCs, OIDC providers and powerful IAM roles, and delegated management for application infrastructure like container images, AWS ECS services, and S3 buckets.
+## Architecture Overview
-The exact balance will depend on your organization's needs.
+The Gruntwork Pipelines architecture varies depending on your Source Control Management (SCM) platform, authentication method, and cloud provider.
-:::
+Below are simplified diagrams for each supported configuration, to provide a high-level overview of the architecture:
-:::tip
-
-The `infrastructure-live-access-control` repository does not have to be named "infrastructure-live-access-control". You can name it whatever you like. It is highly recommended that the repository is named something similar to `infrastructure-live-access-control` to make it clear what it is when reading Gruntwork documentation, however.
-
-It also doesn't have to be the only access control repository in your organization. You can have multiple access control repositories if you have a complex organization structure that requires it. Make sure to evaluate the trade-offs of having multiple access control repositories before doing so. It can be a significant source of complexity and operational overhead. Also note that doing this will sacrifice some of the benefits of having a single source of truth for access control.
-
-:::
-
-### Workflows
-
-- **Pipelines** - This workflow will be used by the `infrastructure-live-access-control` repository to manage access control infrastructure in response to changes in the repository in a GitOps fashion.
-
- While the permissions for this workflow are largely read-only when proposing access control changes, the workflow also has the ability to make changes to relevant access control infrastructure when the changes are merged.
-
-## `infrastructure-catalog`
-
-The `infrastructure-catalog` repository stores OpenTofu/Terraform modules created for internal use. It is optionally provisioned during the bootstrap process of the `infrastructure-live-root` repository.
-
-This repository is optional but recommended for managing reusable infrastructure code. Customers often combine Gruntwork modules with custom modules stored here to extend functionality.
-
-:::tip
-
-While `infrastructure-catalog` can be renamed, keeping a consistent name is recommended for clarity in documentation. Multiple module repositories can be created if necessary, but consider the trade-offs between centralized and decentralized approaches.
-
-It can be advantageous to have one repository for all modules to make it easier to find and share modules across your organization. However, it can also be advantageous to have multiple repositories if you have different teams that need to manage their own modules, or want to have different modules available to different teams within your organization.
-
-:::
-
-### Workflows
-
-- **Tests:** Validates module functionality by provisioning them in real environments, running [Terratests](https://github.com/gruntwork-io/terratest), and tearing them down. This workflow ensures modules work as expected.
-
-
-## `infrastructure-live-delegated`
-
-One of the primary benefits of IaC Foundations is the streamlined delegation of infrastructure management. For the sake of discussion in Gruntwork documentation, we refer to repositories that have been granted delegated permissions to interact with infrastructure as "delegated repositories", or `infrastructure-live-delegated`. Their permissions are granted by the `infrastructure-live-access-control` repository.
-
-These repositories can be created manually by customers for specific purposes. For example, an application repository may need permissions to build and push a container image to AWS ECR, or to deploy an update to an ECS service. These permissions can be delegated by the `infrastructure-live-access-control` repository to a specific repository that needs those permissions.
-
-Enterprise customers can also expect the creation and management of delegated repositories centrally in the `infrastructure-live-root` repository. This is an Enterprise-only feature that allows for the creation of delegated repositories with largely the same degree of infrastructure management as the `infrastructure-live-root` repository itself. This is useful for organizations that want to allow large degrees of infrastructure management (e.g. entire AWS accounts) without having to manually provision and manage the repositories that need those permissions.
-## Entity relationship diagram
+### GitHub with Gruntwork.io App (Recommended)
```mermaid
-erDiagram
- infra-live-root ||--o| infra-live-access-control : "Delegated Access Control"
- infra-live-access-control ||--o{ infra-live-delegated : "Delegated Infrastructure Management"
- infra-live-root ||--o{ infra-live-delegated : "Vended (Enterprise)"
- infra-live-root ||--o| infra-modules : ""
- infra-live-access-control ||--o| infra-modules: ""
- infra-live-delegated }o--o| infra-modules: ""
+graph TB
+ subgraph "Customer GitHub"
+ GH[GitHub Repository]
+ GHA[GitHub Actions]
+ IaC[Infrastructure as Code]
+ end
+
+ subgraph "Gruntwork Services"
+ GDP[Gruntwork Developer Portal app.gruntwork.io]
+ GApp[Gruntwork.io GitHub App]
+ end
+
+ subgraph "Pipelines Runtime"
+ Binary[Gruntwork Pipelines Binary]
+ end
+
+ subgraph "Cloud Provider"
+ OIDC[OpenID Connect]
+ Cloud[Cloud Resources AWS/Azure/Custom]
+ end
+
+ %% Main flow
+ GH --> IaC
+ IaC --> GHA
+ GHA --> Binary
+
+ %% Authentication
+ GDP --> GApp
+ GApp --> GH
+ Binary --> GApp
+
+ %% Cloud access
+ Binary --> OIDC
+ OIDC --> Cloud
+
+ classDef customer fill:#b3e5fc,stroke:#01579b,stroke-width:2px,color:#000000
+ classDef gruntwork fill:#e1bee7,stroke:#4a148c,stroke-width:2px,color:#000000
+ classDef runtime fill:#f8bbd9,stroke:#880e4f,stroke-width:2px,color:#000000
+ classDef cloud fill:#c8e6c9,stroke:#1b5e20,stroke-width:2px,color:#000000
+
+ class GH,GHA,IaC customer
+ class GDP,GApp gruntwork
+ class Binary runtime
+ class OIDC,Cloud cloud
```
-:::note
-
-The term `infrastructure` is abbreviated as `infra` in the diagram for simplicity.
+### GitHub with Machine Users
-:::
-
-The `infrastructure-live-root` repository serves as the central hub for managing all infrastructure.
-
-- Users who choose to delegate access control can use the `infrastructure-live-root` repository to provision the necessary `pipelines-access-control` roles in AWS accounts. These roles allow access control to be managed within the `infrastructure-live-access-control` repository.
+```mermaid
+graph TB
+ subgraph "Customer GitHub"
+ GH[GitHub Repository]
+ GHA[GitHub Actions]
+ IaC[Infrastructure as Code]
+ end
+
+ subgraph "Gruntwork Services"
+ GDP[Gruntwork Developer Portal app.gruntwork.io]
+ GMU[GitHub Machine User Long-lived PAT]
+ end
+
+ subgraph "Pipelines Runtime"
+ Binary[Gruntwork Pipelines Binary]
+ end
+
+ subgraph "Cloud Provider"
+ OIDC[OpenID Connect]
+ Cloud[Cloud Resources AWS/Azure/Custom]
+ end
+
+ %% Main flow
+ GH --> IaC
+ IaC --> GHA
+ GHA --> Binary
+
+ %% Authentication
+ GDP --> GMU
+ GMU --> GH
+ Binary --> GMU
+
+ %% Cloud access
+ Binary --> OIDC
+ OIDC --> Cloud
+
+ classDef customer fill:#b3e5fc,stroke:#01579b,stroke-width:2px,color:#000000
+ classDef gruntwork fill:#e1bee7,stroke:#4a148c,stroke-width:2px,color:#000000
+ classDef runtime fill:#f8bbd9,stroke:#880e4f,stroke-width:2px,color:#000000
+ classDef cloud fill:#c8e6c9,stroke:#1b5e20,stroke-width:2px,color:#000000
+
+ class GH,GHA,IaC customer
+ class GDP,GMU gruntwork
+ class Binary runtime
+ class OIDC,Cloud cloud
+```
-- Users who opt to delegate infrastructure management can use the `infrastructure-live-access-control` repository to provision the required `pipelines-delegated` roles in AWS accounts. These roles enable infrastructure management to be handled through `infrastructure-live-delegated` repositories.
+### GitLab
-- Users who prefer to centralize module management can utilize the `infrastructure-catalog` repository to create and maintain reusable, vetted modules. These modules can be shared across the organization and accessed by any repository as needed.
+```mermaid
+graph TB
+ subgraph "Customer GitLab"
+ GL[GitLab Repository]
+ GLC[GitLab CI]
+ IaC[Infrastructure as Code]
+ end
+
+ subgraph "Gruntwork Services"
+ GDP[Gruntwork Developer Portal app.gruntwork.io]
+ GMU[GitHub Machine User For Pipelines Binary]
+ GLMU[GitLab Machine User Access Token]
+ end
+
+ subgraph "Pipelines Runtime"
+ Binary[Gruntwork Pipelines Binary]
+ end
+
+ subgraph "Cloud Provider"
+ OIDC[OpenID Connect]
+ Cloud[Cloud Resources AWS/Azure/Custom]
+ end
+
+ %% Main flow
+ GL --> IaC
+ IaC --> GLC
+ GLC --> Binary
+
+ %% Authentication - GitLab
+ GDP --> GLMU
+ GLMU --> GL
+ Binary --> GLMU
+
+ %% Authentication - GitHub (for binary download)
+ GDP --> GMU
+ Binary --> GMU
+
+ %% Cloud access
+ Binary --> OIDC
+ OIDC --> Cloud
+
+ classDef customer fill:#b3e5fc,stroke:#01579b,stroke-width:2px,color:#000000
+ classDef gruntwork fill:#e1bee7,stroke:#4a148c,stroke-width:2px,color:#000000
+ classDef runtime fill:#f8bbd9,stroke:#880e4f,stroke-width:2px,color:#000000
+ classDef cloud fill:#c8e6c9,stroke:#1b5e20,stroke-width:2px,color:#000000
+
+ class GL,GLC,IaC customer
+ class GDP,GMU,GLMU gruntwork
+ class Binary runtime
+ class OIDC,Cloud cloud
+```
diff --git a/docs/2.0/docs/pipelines/architecture/security-controls.md b/docs/2.0/docs/pipelines/architecture/security-controls.md
index 8f26b5e596..1c70edec13 100644
--- a/docs/2.0/docs/pipelines/architecture/security-controls.md
+++ b/docs/2.0/docs/pipelines/architecture/security-controls.md
@@ -1,12 +1,10 @@
# Controls
-Gruntwork Pipelines employs a defense-in-depth approach to secure workflows across both GitHub and GitLab platforms. This document outlines the controls Pipelines uses to ensure that only infrastructure written in code and approved by a reviewer can be deployed to your AWS accounts.
+Gruntwork Pipelines employs a defense-in-depth approach to secure workflows across both GitHub and GitLab platforms. This document outlines the controls Pipelines uses to ensure that only infrastructure written in code and approved by a reviewer can be deployed to your cloud environments (e.g. AWS accounts).
## Least privilege principle
-Pipelines adheres to the principle of least privilege, granting only the necessary permissions for infrastructure actions.
-
-By default, the only repository/group required to interact with infrastructure using Pipelines in DevOps Foundations is the `infrastructure-live-root` repository/group. This contains infrastructure code for management, logs, security, and shared accounts. Access should be limited to a small, trusted group responsible for defining critical infrastructure, similar to the role of the `root` user in Unix systems.
+Pipelines adheres to the principle of least privilege, granting only the necessary permissions for infrastructure actions using the context of a pull request or push to the deploy branch to determine the environment(s) to authenticate to, and how to authenticate to them.
## Platform-Specific Access Controls
@@ -16,52 +14,46 @@ import TabItem from "@theme/TabItem"
-- The AWS IAM role assumed via OIDC when pull requests are opened or updated has a trust policy that restricts access to the repository itself and provides read-only permissions
-- The AWS IAM role assumed via OIDC when pull requests are merged into the `main` branch has a trust policy limiting access to the repository's `main` branch and granting write permissions
+- The credentials assumed via OIDC (e.g. STS Tokens for AWS IAM Roles) when pull requests are opened or updated are intended to have trust policies that restrict access to the repository itself and provides read-only permissions.
+- The credentials assumed via OIDC when pull requests are merged into the deploy branch (e.g. `main`) are intended to have trust policies that limiting access to the repository's deploy branch and granting write permissions.
- Branch protection rules can be configured to require reviews and status checks
- GitHub App or machine user authentication options available
-- The AWS IAM role assumed via OIDC when merge requests are opened or updated has a trust policy that restricts access to the group itself and provides read-only permissions
-- The AWS IAM role assumed via OIDC when merge requests are merged into the `main` branch has a trust policy limiting access to the group's `main` branch and granting write permissions
+- The credentials assumed via OIDC (e.g. STS Tokens for AWS IAM Roles) when merge requests are opened or updated are intended to have trust policies that restrict access to the group itself and provides read-only permissions.
+- The credentials assumed via OIDC when merge requests are merged into the deploy branch (e.g. `main`) are intended to have trust policies that limiting access to the group's deploy branch and granting write permissions.
- Protected branches can be configured to require approvals and pipeline success
- Machine user authentication required with group-level access configuration
-## Infrastructure access control
-
-An optional `infrastructure-live-access-control` repository/group can manage access control for infrastructure provisioned in AWS accounts. Using this is a best practice for centralized and auditable access management.
-
-- Access to the `main` branch should be restricted to a small, trusted group managing infrastructure access
-- The same least privilege principles apply: roles assumed for pull/merge requests have read-only permissions, while roles for merged changes have write permissions
-
-Unlike the infrastructure-live-root repository, this repository focuses on managing access control rather than defining infrastructure. You might grant write access to a broader group for managing access while maintaining tight control over the main branch. Encourage collaboration between platform teams and application engineers to review and refine access control continuously.
-
-
-
## Token Strategy
### GitHub App Installation Strategy (Recommended)
-No tokens are required when using the GitHub App.
+
+No long-lived tokens are required when using the GitHub App.
### Machine Users Installation Strategy
-Requires the following tokens be created:
+
+Requires that the following tokens are created:
+
- `PIPELINES_READ_TOKEN`: Classic PAT with read access to required repositories
- `INFRA_ROOT_WRITE_TOKEN`: Fine-grained PAT with read/write access to infrastructure repositories
- `ORG_REPO_ADMIN_TOKEN`: Fine-grained PAT with admin access for repository management
-See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers.md) for more details.
+See [Setup via Machine Users](/2.0/docs/pipelines/installation/viamachineusers) for more details.
-Requires the following tokens be created:
+
+Requires that the following tokens are created:
+
- `PIPELINES_GITLAB_TOKEN`: A GitLab access token with `api` scope
- `PIPELINES_GITLAB_READ_TOKEN`: A GitLab access token with `read_repository` scope
@@ -72,136 +64,14 @@ Pipelines will also require access to Gruntwork's GitHub repositories, however t
-## AWS credentials
-
-Pipelines requires IAM roles configured with trust policies to use OpenID Connect (OIDC) with your CI/CD platform. This eliminates the need for long-lived AWS credentials stored as secrets.
-
-### OpenID Connect Configuration
-Pipelines provisions an OpenID Connect identity provider in AWS IAM for each account, setting GitHub/GitLab as the provider and restricting the audience to AWS STS and your GitHub/GitLab organization. The Pipelines IAM role's trust policy ensures:
-- Only a single repository in your GitHub/GitLab organization can assume the role for plans.
-- Only a single branch can assume the role for applies/destroys.
-
-For more details, see the [official AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html). Below is an example of a trust policy used by Pipelines.
-
-
-
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Federated": "arn:aws:iam::0123456789012:oidc-provider/token.actions.githubusercontent.com"
- },
- "Action": "sts:AssumeRoleWithWebIdentity",
- "Condition": {
- "StringLike": {
- "token.actions.githubusercontent.com:sub": "repo:acme/infrastructure-live-root:ref:*"
- }
- }
- }
- ]
-}
-```
-
-
-
-
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Federated": "arn:aws:iam::0123456789012:oidc-provider/gitlab.com"
- },
- "Action": "sts:AssumeRoleWithWebIdentity",
- "Condition": {
- "StringLike": {
- "gitlab.com:sub": "project_path:acme/projectprefix*:*"
- }
- }
- }
- ]
-}
-
-
-```
-
-
-
-
-
-Refer to [Configuring OpenId Connect in Amazon Web Services](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services) for additional details.
-
-### Roles provisioned by DevOps Foundations
-
-Pipelines automatically provisions specific roles in AWS accounts to support required infrastructure operations. These roles follow the naming pattern `-pipelines-`.
-
-For example:
-- The `root-pipelines-plan` role is used by Pipelines to plan changes in the `infrastructure-live-root` repository.
-
-These roles are designed to operate in a single repository and include a trust policy that only permits GitHub Actions workflows triggered by that repository to assume the role. Each role is provisioned in pairs:
-- `plan` roles, with read-only permissions, are used to execute Terragrunt plans for open pull requests.
-- `apply` roles, with read/write permissions, are used to apply or destroy infrastructure changes for merged pull requests or direct pushes to the deploy branch (commonly `main`).
-
-This separation ensures that controls like [branch protection](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) and [CODEOWNERS files](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) can effectively govern infrastructure changes.
-
-#### `root-pipelines-plan`
-
-A read-only plan role for the `infrastructure-live-root` repository.
-- This role is one of the first created when setting up DevOps Foundations. It is provisioned manually by the customer during the platform setup process.
-- It exists in all accounts and handles tasks necessary for setting up AWS accounts.
-- These roles are highly permissive among read-only roles as they manage foundational AWS account setups.
-
-#### `root-pipelines-apply`
-
-A read/write apply role for the `infrastructure-live-root` repository.
-- Like the plan role, this is one of the initial roles created during setup.
-- It is broadly permissive to support foundational account setups and bootstrapping.
-
-#### `access-control-pipelines-plan`
-
-A read-only plan role for the `infrastructure-live-access-control` repository.
-- These roles are provisioned for new accounts but are not included in core accounts such as `management`, `logs`, `security`, or `shared`.
-- They manage IAM roles and policies for vended accounts, facilitating infrastructure access control.
-
-#### `access-control-pipelines-apply`
-
-A read/write apply role for the `infrastructure-live-access-control` repository.
-- Similar to the plan role, these roles are provisioned for vended accounts but excluded from core accounts.
-- They have permissions to manage IAM roles and policies for the accounts where they are provisioned.
-
-#### `delegated-pipelines-plan`
-
-A read-only plan role for delegated repositories, used by Pipelines Enterprise customers.
-
-- These roles are pre-configured to have minimal permissions, primarily for managing OpenTofu/Terraform state.
-- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary.
-- Users should ensure that only the necessary _read-only_ permissions are granted for the specific delegated repository.
-
-
-:::note
-
-These roles have almost no permissions by default. They are pre-configured by default to only have access to OpenTofu/Terraform state, and the pull requests that are opened to provision them include documentation on how to add additional permissions as appropriate.
+## Cloud Authentication
-It is up to the user provisioning these roles to ensure that this role has only the necessary _read-only_ permissions required to manage infrastructure changes relevant to the delegated repository.
+Pipelines supports multiple different authentication methods for different cloud providers, and will use the appropriate method based on the context of the pull request or push to the deploy branch.
-:::
+To learn more about how Pipelines authenticates to the cloud, read the [Cloud Authentication](/2.0/docs/pipelines/concepts/cloud-auth/index.md) documentation.
-#### `delegated-pipelines-apply`
+## Trust boundaries
-A read/write apply role for delegated repositories.
-- Similar to the plan role, these roles are pre-configured with minimal permissions and are intended for managing OpenTofu/Terraform state.
-- A pull request will be opened in `infrastructure-live-access-control` during provisioning include documentation for adding additional permissions if necessary.
-- Users must ensure that the role has only the necessary _read/write_ permissions required for the delegated repository.
+A critical aspect of Pipelines' architecture is understanding its trust model. Since Pipelines runs within a CI/CD system, it has privileged access to your infrastructure resources (e.g. AWS accounts, VPCs, EC2 instances, etc.).
-:::note
-The `delegated-pipelines-plan` and `delegated-pipelines-apply` roles are automatically provisioned for new delegated accounts. Enterprise customers will see pull requests created in the `infrastructure-live-access-control` repository to vend these roles with proper configurations.
-:::
+Anyone with the ability to edit code in the `main` branch of your repositories inherently has the authority to make corresponding changes in your infrastructure resources. For this reason, it is important to follow the [Repository Access](/2.0/docs/pipelines/installation/viamachineusers#repository-access) guidelines to ensure appropriate access control.
diff --git a/docs/2.0/docs/pipelines/architecture/usage-data.md b/docs/2.0/docs/pipelines/architecture/usage-data.md
index 7bc78674b6..90971ad0ad 100644
--- a/docs/2.0/docs/pipelines/architecture/usage-data.md
+++ b/docs/2.0/docs/pipelines/architecture/usage-data.md
@@ -1,3 +1,3 @@
# Usage Data
-Gruntwork Pipelines collects usage data to gain insights into how customers interact with the product. This data includes information such as the duration of pipeline runs, the number of jobs executed, the customer name, and any application errors encountered during execution.
\ No newline at end of file
+Gruntwork Pipelines collects usage data to gain insights into how customers interact with the product. This data includes information such as the duration of pipeline runs, the number of jobs executed, the customer name, and any application errors encountered during execution.
diff --git a/docs/2.0/docs/pipelines/concepts/cloud-auth.md b/docs/2.0/docs/pipelines/concepts/cloud-auth/aws.mdx
similarity index 58%
rename from docs/2.0/docs/pipelines/concepts/cloud-auth.md
rename to docs/2.0/docs/pipelines/concepts/cloud-auth/aws.mdx
index c252027722..8a4b014fa7 100644
--- a/docs/2.0/docs/pipelines/concepts/cloud-auth.md
+++ b/docs/2.0/docs/pipelines/concepts/cloud-auth/aws.mdx
@@ -1,18 +1,108 @@
-# Authenticating to the Cloud
+# Authenticating to AWS
-## Authenticating to AWS accounts
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
-Pipelines automatically determines which AWS account to authenticate to based on the infrastructure changes proposed in your pull request.
+Pipelines automatically determines which AWS account(s) to authenticate with, and how to authenticate with them, based on the infrastructure changes proposed in your pull request.
-### How Pipelines authenticates to AWS
+## How Pipelines authenticates to AWS
-To execute the actions detected by Pipelines, each AWS account must assume an AWS IAM Role using Open ID Connect (OIDC). At a high level, OIDC works as follows: AWS recognizes GitHub or GitLab as an "identity provider," trusts GitHub’s or GitLab’s request to assume a temporary IAM Role, and then issues AWS credentials valid for the duration of the GitHub Actions or GitLab CI workflow.
+To execute the infrastructure changes detected by Pipelines, each AWS account must have an AWS IAM Role that Pipelines can assume using Open ID Connect (OIDC).
-When creating a new AWS account, it is necessary to update the AWS OIDC configuration to include an IAM role that GitHub or GitLab can assume. When using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this update is performed automatically during the process of [adding a new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account).
+At a high level, OIDC works as follows: AWS recognizes GitHub or GitLab as an "identity provider," trusts GitHub's or GitLab's request to assume a temporary IAM Role, and then issues AWS credentials valid for the duration of the GitHub Actions or GitLab CI workflow.
-### How Pipelines knows what AWS account to authenticate to
+When creating a new AWS account, it is necessary to update the AWS OIDC configuration to include an IAM role that GitHub or GitLab can assume. When using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this update is performed automatically during the process of [vending a new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account).
-Pipelines assumes that each top-level directory in your `infrastructure-live` repository corresponds to a single AWS account, excluding the directory reserved for [module defaults](/2.0/docs/library/concepts/module-defaults). Each account-mapped directory must have an entry in the `accounts.yml` file. The entry should include a key matching the directory name and key/value pairs for the AWS account ID and the root user email address of the account.
+## How Pipelines knows what AWS principals to authenticate as
+
+
+
+
+For HCL configurations, account mappings are defined using environments specified in HCL configuration files in the `.gruntwork` directory (you are using these if you see `.hcl` files in your `.gruntwork` directory).
+
+Whenever Pipelines attempts to authenticate to AWS for a given unit, it will check to see if the unit matches any of the environments specified in your Pipelines HCL configurations. If any do, it will use the corresponding `authentication` block to determine how to authenticate to AWS.
+
+For example, if you have the following environment configuration:
+
+```hcl title=".gruntwork/environments.hcl"
+environment "my_cool_environment" {
+ filter {
+ paths = ["my-cool-account/*"]
+ }
+
+ authentication {
+ aws_oidc {
+ account_id = "123456789012"
+ plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan"
+ apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply"
+ }
+ }
+}
+```
+
+Pipelines will authenticate to AWS using the account with ID `123456789012` when a unit matches the filter `my-cool-account/*`. It will use the `pipelines-plan` role when pull requests are opened/updated, and the `pipelines-apply` role when pull requests are merged. The `pipelines-plan` role typically only has read permissions, while the `pipelines-apply` role typically has both read and write permissions.
+
+Most customers prefer not to have to explicitly track the account IDs of AWS accounts in their configuration files. Instead, they prefer to leverage the `aws` block to parse an `accounts.yml` file that contains the relevant account metadata, then reference the accounts by name in their environment configurations.
+
+For example, you could create an `accounts.yml` file like the following with your account definitions:
+
+```yml title=accounts.yml
+"my-cool-account":
+ "email": "my-root-account-email@example.com"
+ "id": "123456789012"
+```
+
+Then, create an `aws.hcl` file that references this `accounts.yml` file using the `aws` block:
+
+```hcl title=".gruntwork/aws.hcl"
+aws {
+ accounts "all" {
+ path = "accounts.yml"
+ }
+}
+```
+
+You can then reference these accounts in your environment configurations:
+
+```hcl title=".gruntwork/environments.hcl"
+environment "my_cool_environment" {
+ filter {
+ paths = ["my-cool-account/*"]
+ }
+
+ authentication {
+ aws_oidc {
+ account_id = aws.accounts.all.my-cool-account.id
+ plan_iam_role_arn = "arn:aws:iam::${aws.accounts.all.my-cool-account.id}:role/pipelines-plan"
+ apply_iam_role_arn = "arn:aws:iam::${aws.accounts.all.my-cool-account.id}:role/pipelines-apply"
+ }
+ }
+}
+```
+
+```bash title="Infrastructure Live"
+.
+├── .gruntwork/
+│ ├── aws.hcl
+│ └── environments.hcl
+├── accounts.yml
+├── my-cool-account
+│ └── us-east-1
+│ └── dev
+│ └── database
+│ └── terragrunt.hcl
+```
+
+:::info
+The HCL configuration approach provides more flexibility for complex authentication scenarios and enables the use of [Configurations as Code](/2.0/reference/pipelines/configurations-as-code/) features.
+:::
+
+
+
+
+For legacy YAML configurations (you are using these if you see a `.gruntwork/config.yml` file in your repository), account mappings are defined using a combination of the `accounts.yml` file at the root of your repository, and the names of top-level directories in your `infrastructure-live` repository.
+
+Pipelines assumes that each top-level directory in your `infrastructure-live` repository corresponds to a single AWS account, excluding the directory reserved for [module defaults](/2.0/docs/library/concepts/module-defaults) (the one named `_envcommon`). Each account-mapped directory must have an entry in the account configuration. The entry should include a key matching the directory name and key/value pairs for the AWS account ID and the root user email address of the account.
For instance, the following `accounts.yml` entry maps to a directory named `my-cool-account` in your `infrastructure-live` repository:
@@ -25,7 +115,7 @@ For instance, the following `accounts.yml` entry maps to a directory named `my-c
```bash title="Infrastructure Live"
.
├── accounts.yml
-├── _module_defaults
+├── _envcommon
│ └── services
│ └── my-app.hcl
├── my-cool-account
@@ -35,7 +125,10 @@ For instance, the following `accounts.yml` entry maps to a directory named `my-c
│ └── terragrunt.hcl
```
-### AWS account authentication when creating new AWS accounts
+
+
+
+## AWS account authentication when creating new AWS accounts
:::note
@@ -52,7 +145,7 @@ Pipelines manages two main types of infrastructure-change events:
For the first type (add/change/delete Terragrunt files), Pipelines authenticates directly to the AWS account containing the affected resources. For the second type (creating new AWS accounts), Pipelines uses the Management Account.
-#### Management account
+### Management account
Gruntwork's Account Factory is built on AWS Control Tower, which requires that new AWS accounts be created through the [Control Tower Management AWS Account](https://docs.aws.amazon.com/controltower/latest/userguide/how-control-tower-works.html#what-is-mgmt).
@@ -66,15 +159,18 @@ The AWS IAM Role in the Management Account must have permissions to provision ne
:::
-#### Child accounts
+### Child accounts
+
+A child account in the context of Gruntwork Account Factory is an AWS account that is created by AWS Control Tower and is managed by Pipelines. It is a "child" account in that it is considered a child of the Management Account, and Pipelines will perform the initial baselining of the account by first assuming a role in the Management Account (the parent), then use that role to assume a different role in the child account.
Each child account (e.g., `dev`, `stage`, `prod`, etc.) contains an AWS IAM role that Pipelines can assume from GitHub Actions or GitLab CI using OIDC. This role is automatically provisioned during the [account baseline process](/2.0/docs/accountfactory/guides/vend-aws-account). Once the role is established in the child account, users can submit pull requests/merge requests to add, modify, or delete resources in that account.
When a pull request/merge request is created or synchronized, or when changes are pushed to the `main` branch, Pipelines detects the changes, maps them to the appropriate account, assumes the role in the child account, and executes a `terragrunt plan` (for pull requests/merge requests) or `terragrunt apply` (for pushes to `main`).
-### Fundamentals of OIDC for Publicly Available and Private CI/CD platforms
+## Fundamentals of OIDC for Publicly Available and Private CI/CD platforms
### JWT Token Issuers
+
A JWT token is a base64-encoded JSON object that contains three parts: a header, a payload, and a signature. The header typically contains metadata about the token, such as the algorithm used to sign it. The payload contains the claims or assertions made by the issuer, such as the subject (user), audience (intended recipient), and expiration time. The signature is used to verify that the token was issued by a trusted authority and has not been tampered with.
Critically, the issuer is a URL that is both specified inside the token, and is used by consumers of the token to fetch the public key used to validate the signature of that same token. Assuming the public key is fetched via HTTPS, there is a valid trust chain that the token was in fact issued by the expected issuer and you have typical cryptographic guarantees it wasn't substituted or tampered with.
@@ -83,8 +179,8 @@ Typically the issuer is the hostname of the CI/CD platform, such as `https://git
If, however, your CI/CD platform is hosted privately, you will need to host the public key and OIDC configuration in a publicly accessible location, such as an S3 bucket, and update the issuer in your CI/CD configuration to point to that location. The diagrams below illustrate both approaches - fetching the keys directly from your CI/CD platform via a public route, or fetching the keys from a public S3 bucket.
+### Publicly Available CI/CD Platforms
-#### Publicly Available CI/CD Platforms
```mermaid
sequenceDiagram
participant SCM as SCM (GitLab/GitHub etc.)
@@ -102,7 +198,7 @@ sequenceDiagram
```
-#### Non-Publicly Available CI/CD Platforms
+### Non-Publicly Available CI/CD Platforms
This diagram follows the [recommended approach](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) from GitLab for private CI/CD platform instances. The guidance is to host the public key in a publicly accessible S3 bucket and update the issuer in the CI/CD configuration.
diff --git a/docs/2.0/docs/pipelines/concepts/cloud-auth/azure.md b/docs/2.0/docs/pipelines/concepts/cloud-auth/azure.md
new file mode 100644
index 0000000000..b18f966215
--- /dev/null
+++ b/docs/2.0/docs/pipelines/concepts/cloud-auth/azure.md
@@ -0,0 +1,129 @@
+# Authenticating to Azure
+
+Pipelines automatically determines which Azure subscription(s) to authenticate with, and how to authenticate with them, based on the infrastructure changes proposed in your pull request.
+
+## How Pipelines authenticates to Azure
+
+To execute the actions detected by Pipelines, each Azure subscription must have one or more Entra ID applications configured that Pipelines can authenticate with using Open ID Connect (OIDC).
+
+At a high level, OIDC works as follows: Entra ID recognizes GitHub or GitLab as an "identity provider," trusts GitHub's or GitLab's request to authenticate with a specific Entra ID application, and then issues Azure credentials valid for the duration of the GitHub Actions or GitLab CI workflow.
+
+When creating a new Azure subscription, it is necessary to configure Entra ID applications and federated identity credentials to enable GitHub or GitLab authentication via OIDC.
+
+## How Pipelines knows what Azure principals to authenticate as
+
+Azure federated identity mappings are defined using environments specified in HCL configuration files in the `.gruntwork` directory.
+
+Whenever Pipelines attempts to authenticate to Azure for a given unit, it will check to see if the unit matches any of the environments specified in your Pipelines HCL configurations. If any do, it will use the corresponding `authentication` block to determine how to authenticate to Azure.
+
+For example, if you have the following environment configuration:
+
+```hcl title=".gruntwork/environments.hcl"
+environment "my_azure_subscription" {
+ filter {
+ paths = ["my-azure-subscription/*"]
+ }
+
+ authentication {
+ azure_oidc {
+ tenant_id = "a-tenant-id"
+ subscription_id = "a-subscription-id"
+ plan_client_id = "plan-client-id"
+ apply_client_id = "apply-client-id"
+ }
+ }
+}
+```
+
+Pipelines will authenticate to Azure using the subscription with ID `a-subscription-id` within tenant `a-tenant-id` when the filepath of the unit matches the filter `my-azure-subscription/*`. It will use the `plan-client-id` application when pull requests are opened/updated, and the `apply-client-id` application when pull requests are merged. The plan application typically only has read permissions, while the apply application typically has both read and write permissions.
+
+```bash title="Infrastructure Live"
+.
+├── .gruntwork/
+│ └── environments.hcl
+├── my-azure-subscription
+│ └── my-azure-resource-group
+│ └── database
+│ └── terragrunt.hcl
+```
+
+:::info
+The HCL configuration approach provides flexibility for complex authentication scenarios and enables the use of [Configurations as Code](/2.0/reference/pipelines/configurations-as-code/) features.
+:::
+
+## Azure subscription authentication workflow
+
+Pipelines manages infrastructure changes by authenticating directly to the Azure subscription containing the affected resources using OIDC.
+
+When a pull request is created or synchronized, or when changes are pushed to the `main` branch, Pipelines detects the changes, maps them to the appropriate Azure subscription, authenticates using the configured Entra ID application, and executes a Terragrunt plan (for pull requests) or apply (for pushes to `main`).
+
+## Fundamentals of OIDC for Azure with GitHub Actions and GitLab CI
+
+### Entra ID Federated Identity Credentials
+
+Azure uses federated identity credentials to establish trust between external identity providers (like GitHub or GitLab) and Entra ID applications. This eliminates the need to store long-lived secrets in your CI/CD platform.
+
+The federated identity credential configuration includes:
+
+- **Issuer**: The identity provider URL (e.g., `https://token.actions.githubusercontent.com` for GitHub Actions)
+- **Subject identifier**: Specifies which repository, branch, or other criteria must match for the token to be accepted (e.g., `repo:my-org/my-repo:ref:refs/heads/main`)
+- **Audience**: The intended recipient of the token (typically `api://AzureADTokenExchange`)
+
+### Publicly Available CI/CD Platforms
+
+```mermaid
+sequenceDiagram
+ participant SCM as SCM (GitLab/GitHub etc.)
+ participant SCMPublicRoute as SCM Hostname e.g. github.com
+ participant EntraID as Entra ID
+
+ SCM->>SCM: Generate a public/private key pair
+ SCM->>SCM: Generate a JWT and sign with the private key
+ SCM->>EntraID: Send JWT to Entra ID requesting an access token
+ EntraID->>SCMPublicRoute: Fetch public key via HTTPS (which validates that the SCM is who it says it is)
+ SCMPublicRoute->>EntraID: Return the public key
+ EntraID->>EntraID: Validate signature on JWT using public key to validate that it was generated by the Issuer
+ EntraID->>EntraID: Inspect JWT Content and ensure it passes federated identity credential policies
+ EntraID->>SCM: Return access token for the configured application
+```
+
+### Non-Publicly Available CI/CD Platforms
+
+For private CI/CD platform instances, you have a few options to enable OIDC with Azure:
+
+1. **Host OIDC configuration publicly**: Similar to the AWS approach, you can host the OIDC configuration (`.well-known/openid-configuration`) and JWKS (JSON Web Key Set) in a publicly accessible location, such as an Azure Storage Account with static website hosting, and update the issuer in your CI/CD configuration.
+
+2. **Configure firewall exceptions**: Update your application firewalls to specifically allow requests to the `.well-known/openid-configuration` endpoint and the JWKS endpoint from Entra ID.
+
+The diagram below illustrates the first approach - hosting the public key and OIDC configuration in a publicly accessible Azure Storage Account:
+
+```mermaid
+sequenceDiagram
+ participant SCM as SCM (GitLab/GitHub etc.)
+ participant SCMPublicRoute as Public Azure Storage (e.g. acme-public.z6.web.core.windows.net)
+ participant EntraID as Entra ID
+
+ SCM->>SCM: Generate a public/private key pair
+ SCM->>SCMPublicRoute: Publish public key and OIDC config to Azure Storage
+ SCM->>EntraID: Update federated identity credential issuer to Azure Storage public URL
+ SCM->>SCM: Update issuer to hostname of Azure Storage public URL
+ SCM->>SCM: Generate a JWT with updated issuer and sign with the private key
+ SCM->>EntraID: Send JWT to Entra ID requesting an access token
+ EntraID->>SCMPublicRoute: Fetch public key via HTTPS (HTTPS is important as it validates that the host is in fact the issuer)
+ SCMPublicRoute->>EntraID: Return the public key
+ EntraID->>EntraID: Validate signature on JWT using public key to validate that it was generated by the Issuer
+ EntraID->>EntraID: Inspect JWT Content and ensure it passes federated identity credential policies
+ EntraID->>SCM: Return access token for the configured application
+```
+
+### Environment Variables for Azure Authentication
+
+When Pipelines authenticates to Azure using OIDC, it provides the following environment variables to Terragrunt (and therefore OpenTofu/Terraform):
+
+- `ARM_CLIENT_ID`: The client ID of the Azure AD application
+- `ARM_TENANT_ID`: The Azure AD tenant ID
+- `ARM_SUBSCRIPTION_ID`: The Azure subscription ID
+- `ARM_OIDC_TOKEN`: The OIDC token provided by the CI/CD platform
+- `ARM_USE_OIDC`: Set to `true` to enable OIDC authentication
+
+The Azure provider (azurerm) uses these environment variables to authenticate directly with Entra ID using the OIDC token.
diff --git a/docs/2.0/docs/pipelines/concepts/cloud-auth/custom.md b/docs/2.0/docs/pipelines/concepts/cloud-auth/custom.md
new file mode 100644
index 0000000000..9c84a1d413
--- /dev/null
+++ b/docs/2.0/docs/pipelines/concepts/cloud-auth/custom.md
@@ -0,0 +1,194 @@
+# Custom Authentication
+
+Pipelines supports custom authentication mechanisms through the `custom` authentication block, allowing you to integrate with any cloud provider or service that Terragrunt needs to interact with.
+
+:::warning
+
+This is a more advanced feature, and is not recommended for most users if they have a viable alternative listed in [Authenticating to the Cloud](/2.0/docs/pipelines/concepts/cloud-auth/index.md).
+
+Using custom authentication provides more flexibility in how authentication is handled, but it also requires more responsibility from users to ensure that the authentication being used is secure and does not expose sensitive information.
+
+If you are not sure if custom authentication is right for you, please contact Gruntwork support.
+
+:::
+
+## How custom authentication works
+
+Pipelines supports custom authentication via Terragrunt's [auth provider command](https://terragrunt.gruntwork.io/docs/features/authentication/#auth-provider-command) feature. When you configure a `custom` authentication block, Pipelines will execute the command specified in the `auth_provider_cmd` attribute, and pass the output of that command to Terragrunt (with the output expected to match the schema documented in Terragrunt's documentation for the auth provider command feature).
+
+Your authentication provider command should output a JSON response to stdout that follows the schema expected by Terragrunt:
+
+```json
+{
+ "awsCredentials": {
+ "ACCESS_KEY_ID": "",
+ "SECRET_ACCESS_KEY": "",
+ "SESSION_TOKEN": ""
+ },
+ "awsRole": {
+ "roleARN": "",
+ "sessionName": "",
+ "duration": 0,
+ "webIdentityToken": ""
+ },
+ "envs": {
+ "ANY_KEY": "any_value"
+ }
+}
+```
+
+All top-level objects are optional, and you can provide multiple. The `envs` object is particularly useful for setting cloud provider-specific environment variables.
+
+## Configuring custom authentication
+
+Custom authentication is defined using environments specified in HCL configuration files in the `.gruntwork` directory at the root of your repository.
+
+### Basic configuration
+
+```hcl title=".gruntwork/environments.hcl"
+environment "my_custom_environment" {
+ filter {
+ paths = ["my-custom-provider/*"]
+ }
+
+ authentication {
+ custom {
+ auth_provider_cmd = "./scripts/auth-provider.sh"
+ }
+ }
+}
+```
+
+### Path resolution behavior
+
+The `auth_provider_cmd` attribute supports flexible path resolution:
+
+#### 1. Relative paths (recommended)
+
+When you specify a relative path, Pipelines first looks for the file relative to your `.gruntwork` directory at the root of your repository:
+
+```hcl
+custom {
+ auth_provider_cmd = "./scripts/auth-provider.sh"
+}
+```
+
+Expected location: `.gruntwork/scripts/auth-provider.sh`
+
+If the file is not found in a path relative to the `.gruntwork` directory (you can also use paths that start with `..` to go out of the `.gruntwork` directory), Pipelines will use the path as-is, allowing Terragrunt to resolve it relative to the unit directory.
+
+#### 2. Absolute paths
+
+Absolute paths are used exactly as specified:
+
+```hcl
+custom {
+ auth_provider_cmd = "/usr/local/bin/auth-provider"
+}
+```
+
+#### 3. Commands in PATH
+
+You can also reference commands available in your system's PATH:
+
+```hcl
+custom {
+ auth_provider_cmd = "my-custom-auth-provider --method=gcp --project=my-project"
+}
+```
+
+### Example authentication scripts
+
+#### Google Cloud Platform
+
+```bash title=".gruntwork/scripts/gcp-auth.sh"
+#!/bin/bash
+set -e
+
+# Authenticate using gcloud and get access token
+ACCESS_TOKEN=$(gcloud auth print-access-token)
+
+# Output credentials in the format expected by Terragrunt
+cat <
+
-
+## OpenTofu & Terraform
-## Terraform & OpenTofu
-
-You can specify whether to invoke Terraform or OpenTofu in your Pipeline by configuring the [tf-binary](/2.0/reference/pipelines/configurations#tf-binary) setting. Define the versions of `tf-binary` and Terragrunt in the [mise.toml](/2.0/reference/pipelines/configurations#example-mise-configuration) file within your repository.
+You can specify whether to invoke OpenTofu or Terraform with Pipelines by configuring the [tf-binary](/2.0/reference/pipelines/configurations#tf-binary) setting. Define the versions of Terragrunt and OpenTofu/Terraform used by Pipelines in the [mise.toml](/2.0/reference/pipelines/configurations#example-mise-configuration) file within your repository.
diff --git a/docs/2.0/docs/pipelines/guides/handling-broken-iac.md b/docs/2.0/docs/pipelines/guides/handling-broken-iac.md
index 53b426f5b0..506cd8fedf 100644
--- a/docs/2.0/docs/pipelines/guides/handling-broken-iac.md
+++ b/docs/2.0/docs/pipelines/guides/handling-broken-iac.md
@@ -1,6 +1,6 @@
# Handling Broken Infrastructure as Code
-When working with Infrastructure as Code (IaC) at scale, you may occasionally encounter broken or invalid configuration files that prevent Terragrunt from successfully running operations. These issues can block entire CI/CD pipeline, preventing even valid infrastructure changes from being deployed.
+When working with Infrastructure as Code (IaC) at scale, you may occasionally encounter broken or invalid configuration files that prevent Terragrunt from successfully running operations. These issues can block the entire CI/CD pipeline, preventing even valid infrastructure changes from being deployed.
This guide presents several strategies for handling broken IaC while keeping your pipelines operational.
@@ -16,13 +16,13 @@ Common causes of broken IaC include:
- Temporary or experimental code
- Resources or modules that have are work in progress
-Depending on the type of run pipeline is executing, broken IaC can fail a pipeline and prevent other, legitimate changes from being deployed. Especially in circumstances where pipelines will trigger a `terragrunt run-all` it is important that all IaC is valid or properly excluded.
+Depending on the type of run pipeline is executing, broken IaC can fail a pipeline and prevent other, legitimate changes from being deployed. Especially in circumstances where pipelines will trigger a `terragrunt run --all` it is important that all IaC is valid or properly excluded.
## Resolution Strategies
Here are several approaches to manage broken IaC, presented in order of preference:
-### 1. Fix the Invalid Code (Preferred Solution)
+### Fix the Invalid Code (Preferred Solution)
The ideal solution is to fix the underlying issues:
@@ -41,7 +41,7 @@ git push
Then create a merge/pull request to apply the fix to your main branch.
-### 2. Remove the Invalid IaC
+### Remove the Invalid IaC
If you can't fix the issue immediately but the infrastructure is no longer needed, you can remove the problematic code:
@@ -55,22 +55,22 @@ git commit -m "Remove deprecated infrastructure module"
git push
```
-### 3. Use a `.terragrunt-excludes` File
+### Use a `.terragrunt-excludes` File
If you wish to keep the broken code as is and simply have it ignored by pipelines and Terragrunt, you can use a `.terragrunt-excludes` file to skip problematic units:
-1. Create a `.terragrunt-excludes` file in the root of your repository:
+Create a `.terragrunt-excludes` file in the root of your repository:
-```
+```text
# .terragrunt-excludes
# One directory per line (no globs)
account/region/broken-module1
account/region/broken-module2
```
-2. Commit this file to your repository, and Terragrunt will automatically exclude these directories when using `run-all`. Note, if you make a change to the code in those units and pipelines triggers a `run` in that directory itself, then the exclude will not be applied.
+Commit this file to your repository, and Terragrunt will automatically exclude these directories when using `run --all`. Note, if you make a change to the code in those units and pipelines triggers a `run` in that directory itself, then the exclude will not be applied.
-### 4. Configure Exclusions with Pipelines Environment Variables
+### Configure Exclusions with Pipelines Environment Variables
If you don't wish to use `.terragrunt-excludes` in the root of the repository, you can create another file in a different location and set the `TG_QUEUE_EXCLUDES_FILE` environment variable to that path. You then use the Pipelines [`env` block](/2.0/reference/pipelines/configurations-as-code/api#env-block) in your `.gruntwork/pipelines.hcl` configuration to set environment variables that control Terragrunt's behavior:
@@ -94,14 +94,14 @@ repository {
When excluding modules, be aware of dependencies:
1. If module B depends on module A, and module A is excluded, you may need to exclude module B as well.
-2. Use `terragrunt graph-dependencies` to visualize your dependency tree.
+2. Use `terragrunt dag graph` to visualize your dependency tree.
## Best Practices
1. **Document exclusions**: Add comments to your `.terragrunt-excludes` file explaining why each directory is excluded.
2. **Track in issue system**: Create tickets for excluded modules that need to be fixed, including any relevant dates/timelines for when they should be revisited.
3. **Regular cleanup**: Periodically review and update your excluded directories.
-4. **Validate locally**: Run `terragrunt hcl-validate` or `terragrunt validate` locally before committing changes.
+4. **Validate locally**: Run `terragrunt hcl validate` or `terragrunt validate` locally before committing changes.
## Troubleshooting
@@ -112,4 +112,4 @@ If you're still experiencing issues after excluding directories:
- Review pipeline logs to confirm exclusions are being applied
- Verify you don't have conflicting environment variable settings
-By implementing these strategies, you can keep your infrastructure pipelines running smoothly while addressing underlying issues in your codebase.
\ No newline at end of file
+By implementing these strategies, you can keep your infrastructure pipelines running smoothly while addressing underlying issues in your codebase.
diff --git a/docs/2.0/docs/pipelines/guides/installing-drift-detection.md b/docs/2.0/docs/pipelines/guides/installing-drift-detection.md
index 50beaf6bd8..ed46c2c976 100644
--- a/docs/2.0/docs/pipelines/guides/installing-drift-detection.md
+++ b/docs/2.0/docs/pipelines/guides/installing-drift-detection.md
@@ -1,23 +1,43 @@
# Installing Drift Detection
import PersistentCheckbox from '/src/components/PersistentCheckbox';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
:::note
-Pipelines Drift Detection is exclusively available to DevOps Foundations Enterprise customers on GitHub. GitLab support is coming soon.
+Pipelines Drift Detection is exclusively available to DevOps Foundations Enterprise customers on GitHub and GitLab.
:::
For new pipelines repositories using the latest version of Pipelines, Drift Detection is installed automatically and requires no additional action.
To upgrade an existing repository and enable Drift Detection, follow these steps:
-### Step 1 - Ensure the GitHub App is installed
+### Step 1 - Ensure authentication is properly configured
+
+
+
Verify that the [GitHub App](/2.0/docs/pipelines/installation/viagithubapp) is installed and in use for this repository. Drift Detection relies on permissions granted by the GitHub App and is not compatible with machine user tokens.
+
+
+
+Verify that your GitLab project has the necessary [Machine User tokens](/2.0/docs/pipelines/installation/viamachineusers) configured. Drift Detection requires:
+- `PIPELINES_GITLAB_TOKEN`: A GitLab access token with `api` scope
+- `PIPELINES_GITLAB_READ_TOKEN`: A GitLab access token with `read_repository` scope
+
+
+
+
+
+
### Step 2 - Set up the workflow file
+
+
+
Create a new workflow file in your repository at `.github/workflows/pipelines-drift-detection.yml`.
This directory is the same location as your other Pipelines workflows.
@@ -51,10 +71,49 @@ jobs:
branch-name: ${{ inputs.branch-name }}
```
-Commit the changes to the repository. If [branch protection](/2.0/docs/pipelines/installation/branch-protection) is enabled—which is strongly recommended—you must create a new pull request to incorporate the workflow into your repository.
-
+
+
+
+Drift Detection for GitLab is implemented in the `pipelines-workflows` GitLab CI/CD Component. Add the following configuration to your `.gitlab-ci.yml` file:
+
+```yml
+spec:
+ inputs:
+ pipelines_workflow:
+ options: ["infrachanges", "drift-detection"]
+ description: "Select the pipeline workflow to use"
+ default: "infrachanges"
+ pipelines_drift_detection_filter:
+ type: string
+ description: "Filter for drift detection"
+ default: ""
+ pipelines_drift_detection_branch:
+ type: string
+ description: "Branch name for drift detection"
+ default: "drift-detection"
+---
+workflow:
+ name: GruntworkPipelines
+ rules:
+ - if: $CI_PIPELINE_SOURCE == 'merge_request_event'
+ - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
+include:
+ # Reference the latest version of the pipelines workflow, replace this path if you are
+ # using a fork of the pipelines-workflows repository
+ - component: $CI_SERVER_FQDN/gruntwork-io/pipelines-workflows/pipelines@v2
+ inputs:
+ pipelines_workflow: $[[ inputs.pipelines_workflow ]]
+ pipelines_drift_detection_filter: $[[ inputs.pipelines_drift_detection_filter ]]
+ pipelines_drift_detection_branch: $[[ inputs.pipelines_drift_detection_branch ]]
+```
+
+
+
+
+
+
### Step 3 - Run your first drift detection job
Follow the instructions at [Running Drift Detection](/2.0/docs/pipelines/guides/running-drift-detection) to start using the new workflow.
diff --git a/docs/2.0/docs/pipelines/guides/managing-secrets.md b/docs/2.0/docs/pipelines/guides/managing-secrets.mdx
similarity index 58%
rename from docs/2.0/docs/pipelines/guides/managing-secrets.md
rename to docs/2.0/docs/pipelines/guides/managing-secrets.mdx
index f639f1fc8c..96e8b42bfd 100644
--- a/docs/2.0/docs/pipelines/guides/managing-secrets.md
+++ b/docs/2.0/docs/pipelines/guides/managing-secrets.mdx
@@ -20,12 +20,19 @@ To interact with the GitLab API, Pipelines requires a Machine User with a [Perso
-## Authenticating with AWS
+## Authenticating with Cloud Providers
-Pipelines requires authentication with AWS but avoids long-lived credentials by utilizing [OIDC](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services). OIDC establishes an authenticated relationship between a specific Git reference in a repository and a corresponding AWS role, enabling Pipelines to assume the role based on where the pipeline is executed.
+Pipelines requires authentication with your cloud provider but avoids long-lived credentials by utilizing OIDC (OpenID Connect). OIDC establishes an authenticated relationship between a specific Git reference in a repository and a corresponding cloud provider identity, enabling Pipelines to assume the identity based on where the pipeline is executed.
-The role assumption process operates as follows:
+
+
+
+{/* We use an h3 here instead of a markdown heading to avoid breaking the ToC */}
+
Authenticating with AWS
+
+Pipelines uses [OIDC to authenticate with AWS](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services), allowing it to assume an AWS IAM role without long-lived credentials.
+The role assumption process operates as follows:
@@ -41,7 +48,7 @@ sequenceDiagram
AWS STS->>GitHub Actions: Temporary AWS Credentials
```
-For more details, see [GitHub's OIDC documentation](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services).
+For more details, see [GitHub's OIDC documentation for AWS](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services).
@@ -57,25 +64,77 @@ sequenceDiagram
AWS STS->>GitLab CI/CD: Temporary AWS Credentials
```
-For more details, see [GitLab's OIDC documentation](https://docs.gitlab.com/ee/ci/cloud_services/aws/).
+For more details, see [GitLab's OIDC documentation for AWS](https://docs.gitlab.com/ee/ci/cloud_services/aws/).
As a result, Pipelines avoids storing long-lived AWS credentials and instead relies on ephemeral credentials generated by AWS STS. These credentials grant least-privilege access to the resources needed for the specific operation being performed (e.g., read access during a pull/merge request open event or write access during a merge).
+
+
+
+{/* We use an h3 here instead of a markdown heading to avoid breaking the ToC */}
+
Authenticating with Azure
+
+Pipelines uses [OIDC to authenticate with Azure](https://learn.microsoft.com/en-us/entra/architecture/auth-oidc), allowing it to obtain access tokens from Entra ID without long-lived credentials.
+
+The authentication process operates as follows:
+
+
+
+
+```mermaid
+sequenceDiagram
+ participant GitHub Actions
+ participant token.actions.githubusercontent.com
+ participant Entra ID
+ GitHub Actions->>token.actions.githubusercontent.com: OpenID Connect Request
+ token.actions.githubusercontent.com->>GitHub Actions: GitHub JWT
+ GitHub Actions->>Entra ID: Request Access Token (Authorization: GitHub JWT)
+ Entra ID->>GitHub Actions: Azure Access Token
+```
+
+For more details, see [GitHub's OIDC documentation for Azure](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-azure).
+
+
+
+
+```mermaid
+sequenceDiagram
+ participant GitLab CI/CD
+ participant gitlab.com
+ participant Entra ID
+ GitLab CI/CD->>gitlab.com: OIDC ID Token Request with preconfigured audience
+ gitlab.com->>GitLab CI/CD: GitLab JWT
+ GitLab CI/CD->>Entra ID: Request Access Token (Authorization: GitLab JWT)
+ Entra ID->>GitLab CI/CD: Azure Access Token
+```
+
+For more details, see [GitLab's documentation on Azure integration](https://docs.gitlab.com/ee/ci/cloud_services/).
+
+
+
+
+As a result, Pipelines avoids storing long-lived Azure credentials and instead relies on ephemeral access tokens generated by Entra ID. These tokens grant least-privilege access to the resources needed for the specific operation being performed.
+
+
+
+
## Other providers
-If you are managing configurations for additional services using Infrastructure as Code (IaC) tools like Terragrunt, you may need to configure a provider for those services in Pipelines. In such cases, you must supply the necessary credentials for authenticating with the provider. Whenever possible, follow the same principles applied to AWS: use ephemeral credentials, grant only the minimum permissions required, and avoid storing long-lived credentials on disk.
+If you are managing configurations for additional services using Infrastructure as Code (IaC) tools like Terragrunt, you may need to configure a provider for those services in Pipelines. In such cases, you must supply the necessary credentials for authenticating with the provider. Whenever possible, follow the same principles: use ephemeral credentials, grant only the minimum permissions required, and avoid storing long-lived credentials on disk.
### Configuring providers in Terragrunt
For example, consider configuring the [Cloudflare Terraform provider](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs). This provider supports multiple authentication methods to enable secure API calls to Cloudflare services. To authenticate with Cloudflare and manage the associated credentials securely, you need to configure your `terragrunt.hcl` file appropriately.
-First, examine the default AWS authentication provider setup in the root `terragrunt.hcl` file:
+First, examine the default cloud provider authentication setup in the root `root.hcl` file from Gruntwork provided Boilerplate templates:
+
+
-```hcl
+```hcl title="root.hcl"
generate "provider" {
path = "provider.tf"
if_exists = "overwrite_terragrunt"
@@ -93,9 +152,29 @@ EOF
}
```
-This provider block is dynamically generated during the execution of any `terragrunt` command and supplies the AWS provider with the required configuration to discover credentials made available by the pipelines.
+This provider block (the value of `contents`) is dynamically generated as the file `provider.tf` during the execution of any `terragrunt` command and supplies the OpenTofu/Terraform AWS provider with the required configuration to discover credentials made available by the pipelines.
-With this approach, no secrets are written to disk. Instead, the AWS provider dynamically retrieves secrets at runtime.
+
+
+
+```hcl
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+
+With this approach, no secrets are written to disk. Instead, the cloud provider dynamically retrieves secrets at runtime.
According to the Cloudflare documentation, the Cloudflare provider supports several authentication methods. One option involves using the [api_token](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs#api_key) field in the `provider` block, as illustrated in the documentation:
@@ -126,25 +205,42 @@ In this context, `fetch-cloudflare-api-token.sh` is a script designed to retriev
You are free to use any method to fetch the secret, provided it outputs the value to stdout.
-Here are two straightforward examples of how you might fetch the secret:
+Here are straightforward examples of how you might fetch the secret based on your cloud provider:
+
+
+
+
+Using AWS Secrets Manager:
+
+```bash
+aws secretsmanager get-secret-value --secret-id cloudflare-api-token --query SecretString --output text
+```
+
+Using AWS SSM Parameter Store:
+
+```bash
+aws ssm get-parameter --name cloudflare-api-token --query Parameter.Value --output text --with-decryption
+```
+
+Given that Pipelines is already authenticated with AWS for interacting with state, this setup provides a convenient method for retrieving secrets.
-1. Using `aws secretsmanager`:
+
+
- ```bash
- aws secretsmanager get-secret-value --secret-id cloudflare-api-token --query SecretString --output text
- ```
+Using Azure Key Vault:
-2. Using `aws ssm`:
+```bash
+az keyvault secret show --vault-name --name cloudflare-api-token --query value --output tsv
+```
- ```bash
- aws ssm get-parameter --name cloudflare-api-token --query Parameter.Value --output text --with-decryption
- ```
+Given that Pipelines is already authenticated with Azure for interacting with state, this setup provides a convenient method for retrieving secrets.
-Given that Pipelines is already authenticated with AWS for interacting with state, this setup provides a convenient method for retrieving the Cloudflare API token.
+
+
:::
-Alternatively, note that the `api_token` field is optional. Similar to the AWS provider, you can use the `CLOUDFLARE_API_TOKEN` environment variable to supply the API token to the provider at runtime.
+Alternatively, note that the `api_token` field is optional. Similar to cloud provider authentication, you can use the `CLOUDFLARE_API_TOKEN` environment variable to supply the API token to the provider at runtime.
To achieve this, you can update the `provider` block as follows:
@@ -172,6 +268,7 @@ terraform {
}
}
```
+
### Managing secrets
When configuring providers and Pipelines, it's important to store secrets in a secure and accessible location. Several options are available for managing secrets, each with its advantages and trade-offs.
@@ -211,33 +308,62 @@ GitLab CI/CD Variables provide a native way to store secrets for your pipelines.
-#### AWS Secrets Manager
+#### Cloud Provider Secret Stores
+
+Cloud providers offer dedicated secret management services with advanced features and security controls.
+
+
+
+
+**AWS Secrets Manager**
AWS Secrets Manager offers a sophisticated solution for managing secrets. It allows for provisioning secrets in AWS and configuring fine-grained access controls through AWS IAM. It also supports advanced features like secret rotation and access auditing.
**Advantages**:
-- Granular access permissions, ensuring secrets are only accessible when required.
-- Support for automated secret rotation and detailed access auditing.
+- Granular access permissions, ensuring secrets are only accessible when required
+- Support for automated secret rotation and detailed access auditing
**Trade-offs**:
-- Increased complexity in setup and management.
-- Potentially higher costs associated with its use.
+- Increased complexity in setup and management
+- Potentially higher costs associated with its use
Refer to the [AWS Secrets Manager documentation](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) for further details.
-#### AWS SSM Parameter Store
+**AWS SSM Parameter Store**
AWS SSM Parameter Store is a simpler and more cost-effective alternative to Secrets Manager. It supports secret storage and access control through AWS IAM, providing a basic solution for managing sensitive data.
**Advantages**:
-- Lower cost compared to Secrets Manager.
-- Granular access control similar to Secrets Manager.
+- Lower cost compared to Secrets Manager
+- Granular access control similar to Secrets Manager
**Trade-offs**:
-- Limited functionality compared to Secrets Manager, such as less robust secret rotation capabilities.
+- Limited functionality compared to Secrets Manager, such as less robust secret rotation capabilities
Refer to the [AWS SSM Parameter Store documentation](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html) for additional information.
+
+
+
+**Azure Key Vault**
+
+Azure Key Vault provides a comprehensive solution for managing secrets, keys, and certificates. It offers fine-grained access controls through Azure RBAC and supports advanced features like secret versioning and access auditing.
+
+**Advantages**:
+- Granular access permissions with Azure RBAC and access policies
+- Support for secret versioning, soft-delete, and purge protection
+- Integration with Azure Monitor for detailed audit logs
+- Hardware Security Module (HSM) backed options for enhanced security
+
+**Trade-offs**:
+- Additional setup complexity for RBAC and access policies
+- Costs associated with transactions and HSM-backed vaults
+
+Refer to the [Azure Key Vault documentation](https://learn.microsoft.com/en-us/azure/key-vault/general/overview) for further details.
+
+
+
+
#### Deciding on a secret store
When selecting a secret store, consider the following key factors:
diff --git a/docs/2.0/docs/pipelines/guides/running-drift-detection.md b/docs/2.0/docs/pipelines/guides/running-drift-detection.md
index d1b1300c38..368ad3877a 100644
--- a/docs/2.0/docs/pipelines/guides/running-drift-detection.md
+++ b/docs/2.0/docs/pipelines/guides/running-drift-detection.md
@@ -1,5 +1,8 @@
# Running Drift Detection
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
## Detecting Drift
Pipelines Drift Detection can be executed manually or on a scheduled basis.
@@ -8,16 +11,52 @@ Pipelines Drift Detection can be executed manually or on a scheduled basis.
It is recommended to start with manual runs, focusing on individual directories of your IaC. This approach allows you to resolve drift incrementally before enabling scheduled Drift Detection for the entire repository.
:::
+### Drift Detection Filter
+
+The Drift Detection Filter is used to limit the units that are checked for drift. It is a comma-separated list of paths that should be included when checking for drift. The filter can be combined with the Ignore List to further limit the units that are checked for drift. The filter uses the same syntax as the [Ignore List](/2.0/reference/pipelines/ignore-list).
+
+The Filter can contain multiple patterns separated by the `,` character.
+
+- `,` Is used as a separator between filters
+- `*` Matches any character except `/`, for matches within a specific directory.
+- `**` Matches any character, for matches across multiple directories.
+
### Running manually
+
+
+
You can manually initiate Pipelines Drift Detection by navigating to the Actions tab in your GitHub repository, selecting "Pipelines Drift Detection" from the left-hand menu, and then clicking "Run Workflow."
-By default, the workflow evaluates all units in your repository and generates a pull request on the `drift-detection` branch. To limit drift detection to specific units, specify a path filter. For instance, to target only the `management` directory, use the filter `./management/*` (note the leading `./`).
+By default, the workflow evaluates all units in your repository and generates a pull request on the `drift-detection` branch. To limit drift detection to specific units, specify a path filter. For instance, to target only the `management` directory, use the filter `management/**`.
-
+
+
+
+
+
+You can manually initiate Pipelines Drift Detection by navigating to the Build > Pipelines section in your GitLab project and clicking "New pipeline".
+
+Under Inputs change the "pipelines_workflow" input to "drift-detection".
+
+By default, the workflow evaluates all units in your repository and generates a merge request on the `drift-detection` branch. To limit drift detection to specific units, set the `pipelines_drift_detection_filter` input. For instance, to target only the `management` directory, set the variable to `management/**`.
+
+Click "New pipeline" to run the workflow.
+
+:::warning
+Running Drift Detection on a large repository can take a long time and use a significant amount of GitLab compute minutes. If the configured [GitLab job duration](https://docs.gitlab.com/ci/pipelines/settings/#set-a-limit-for-how-long-jobs-can-run) is exceeded, the job will be cancelled. We recommend using the Filter input to limit the units that are checked for drift.
+:::
+
+
+
+
+
### Running on a schedule
+
+
+
To enable scheduled runs:
1. Uncomment the `schedule` block in `.github/workflows/pipelines-drift-detection.yml` that contains `- cron: '15 12 * * 1'`.
@@ -28,17 +67,37 @@ To enable scheduled runs:
Running Drift Detection too frequently can consume a significant number of GitHub Action minutes. Begin with a lower frequency and adjust as needed based on your usage patterns.
:::
+
+
+
+To create a scheduled run:
+
+1. Navigate to Build > Pipeline schedules in your GitLab project
+2. Click "Create a new pipeline schedule"
+3. Provide a description and choose the interval for the schedule
+4. Under Inputs use the Select inputs drop down to add the "pipelines_workflow" input
+5. Change the "pipelines_workflow" input to "drift-detection"
+6. Optionally, add the "pipelines_drift_detection_filter" input and set it to your desired path filter
+7. Click "Create pipeline schedule"
+
+:::caution
+Running Drift Detection too frequently can consume a significant number of GitLab CI/CD minutes. Begin with a lower frequency and adjust as needed based on your usage patterns.
+:::
+
+
+
+
## Resolving Drift
Drift can be addressed by either applying the current IaC configuration from your repository or modifying the modules to match the infrastructure state in the cloud.
-### Merging the pull request
+### Merging the drift request
-Merging the pull request triggers a `terragrunt apply` on the modules identified as having drift.
+Merging the pull/merge request triggers a `terragrunt apply` on the modules identified as having drift.
### Updating units
Alternatively, modify the drifted modules to align them with the desired state and commit the changes to the drift-detection branch. Each change triggers a new `terragrunt plan` for the affected units, which you can review to ensure the drift is resolved.
-When the pull request is merged, Pipelines will execute `terragrunt apply` on all drifted or modified units. If a unit no longer exhibits drift, the apply operation will result in no changes being made to the infrastructure.
+When the pull/merge request is merged, Pipelines will execute `terragrunt apply` on all drifted or modified units. If a unit no longer exhibits drift, the apply operation will result in no changes being made to the infrastructure.
diff --git a/docs/2.0/docs/pipelines/guides/unlock.md b/docs/2.0/docs/pipelines/guides/unlock.md
new file mode 100644
index 0000000000..89e4d683f2
--- /dev/null
+++ b/docs/2.0/docs/pipelines/guides/unlock.md
@@ -0,0 +1,99 @@
+# Unlocking State Locks
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## Overview
+
+Occasionally, OpenTofu/Terraform state locks may remain in place if the process holding the lock does not release it properly. This can occur due to unexpected failures, such as crashes or premature termination of jobs.
+
+When this occurs, jobs will fail after a timeout with `Error: Error acquiring the state lock`, and will not succeed until the lock is manually removed.
+
+:::note
+You might have a default lock timeout in your `root.hcl` file that looks like this:
+
+```hcl
+terraform {
+ extra_arguments "retry_lock" {
+ commands = get_terraform_commands_that_need_locking()
+ arguments = ["-lock-timeout=10m"]
+ }
+}
+```
+
+:::
+
+## Unlocking Unit State
+
+When a unit lock is held, the lock can be manually removed with the unlock unit workflow. This is a convenience workflow for running the OpenTofu/Terraform force-unlock command. The workflow requires the following inputs:
+
+### Lock ID
+
+The Lock ID can be found in the logs where a job has failed to acquire the state lock.
+
+
+
+In this example the ID is `ca9c97f1-6315-c0d1-56ef-efb8c8996e8c`.
+
+### Unit Path
+
+The Unit Path is the relative path to the directory containing your `terragrunt.hcl` file where the lock is being held.
+
+In the above example the Unit Path is `acme/us-east-1/storage/s3bucket1`.
+
+### Stack Path
+
+Optional: When a Unit within a Stack is locked, Pipelines requires the Stack Path to generate the stack before running force-unlock in the Unit.
+
+
+
+
+1. From the Actions Tab, select Pipelines Unlock from the list of workflows on the left.
+2. Select the Run workflow dropdown on the right
+3. Enter the Lock ID and Unit Path values into the dropdown
+4. Select Run workflow
+
+
+
+
+
+
+1. From the Pipelines tab select New Pipeline
+2. Select the `pipelines_workflow` Input and change the Value to `unlock-unit` from the dropdown
+3. Enter the Lock ID and Unit Path into the `pipelines_unlock_unit_lock_id` and `pipelines_unlock_unit_path` inputs
+4. Select New pipeline
+
+
+
+
+
+
+## Unlocking All State
+
+:::warning
+The Unlock All workflow currently only unlocks AWS DynamoDB locks. It does this by attempting to delete the entire `terraform-locks` table in each region.
+:::
+
+In the event that many locks are being held, and it is difficult to obtain the Lock IDs, an Unlock All workflow exists to forcibly remove all locks. Run this workflow with caution.
+
+
+
+
+1. From the Actions Tab, select Pipelines Unlock from the list of workflows on the left.
+2. Select the Run workflow dropdown on the right
+3. Tick the checkbox to forcibly reset all locks
+4. Select Run workflow
+
+
+
+
+
+
+1. From the Pipelines tab select New Pipeline
+2. Select the `pipelines_workflow` Input and change the Value to `unlock-all` from the dropdown
+3. Select New pipeline
+
+
+
+
+
diff --git a/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx
new file mode 100644
index 0000000000..c1f2af5c26
--- /dev/null
+++ b/docs/2.0/docs/pipelines/installation/addingexistinggitlabrepo.mdx
@@ -0,0 +1,891 @@
+# Bootstrap Pipelines in an Existing GitLab Project
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import PersistentCheckbox from '/src/components/PersistentCheckbox';
+import CustomizableValue from '/src/components/CustomizableValue';
+
+This guide provides comprehensive instructions for integrating [Gruntwork Pipelines](https://gruntwork.io/products/pipelines/) into an existing GitLab project with Infrastructure as Code (IaC). This is designed for Gruntwork customers who want to add Pipelines to their current infrastructure projects for streamlined CI/CD management.
+
+To configure Gruntwork Pipelines in an existing GitLab project, complete the following steps (which are explained in detail below):
+
+1. **(If using a self-hosted GitLab instance) Ensure OIDC configuration and JWKS are publicly accessible.**
+2. **Plan your Pipelines setup** by identifying all environments and cloud accounts/subscriptions you need to manage.
+3. **Bootstrap core infrastructure** in accounts/subscriptions that don't already have the required OIDC and state management resources.
+4. **Configure SCM access** using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs).
+5. **Create `.gruntwork` HCL configurations** to tell Pipelines how to authenticate and organize your environments.
+6. **Create `.gitlab-ci.yml`** to configure your GitLab CI/CD pipeline.
+7. **Commit and push** your changes to activate Pipelines.
+
+## Ensure OIDC configuration and JWKS are publicly accessible
+
+This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step.
+
+1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance.
+2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps.
+
+:::note Progress Checklist
+
+
+
+:::
+
+## Prerequisites
+
+Before starting, ensure you have:
+
+- **An active Gruntwork subscription** with Pipelines access. Verify by checking the [Gruntwork Developer Portal](https://app.gruntwork.io/account) and confirming access to "pipelines" repositories in your GitHub team.
+- **Cloud provider credentials** with permissions to create OIDC providers and IAM roles in accounts where Pipelines will manage infrastructure.
+- **Git installed** locally for cloning and managing your project.
+- **Existing IaC project** with Terragrunt configurations you want to manage with Pipelines (if you are using OpenTofu/Terraform, and want to start using Terragrunt, read the [Quickstart Guide](https://terragrunt.gruntwork.io/docs/getting-started/quick-start)).
+
+## Planning Your Pipelines Setup
+
+Before implementing Pipelines, it's crucial to plan your setup by identifying all the environments and cloud resources you need to manage.
+
+### Identify Your Environments
+
+Review your existing project structure and identify:
+
+1. **All environments** you want to manage with Pipelines (e.g., `dev`, `staging`, `prod`)
+2. **Cloud accounts/subscriptions** associated with each environment
+3. **Directory paths** in your project that contain Terragrunt units for each environment
+4. **Existing OIDC resources** that may already be provisioned in your accounts
+
+:::note Progress Checklist
+
+
+
+
+
+
+:::
+
+### Determine Required OIDC Roles
+
+For each AWS Account / Azure Subscription you want to manage, you might already have some or all of the following resources provisioned.
+
+
+
+
+**Required AWS Resources:**
+
+- An OIDC provider for GitLab
+- An IAM role for Pipelines to assume when running Terragrunt plan commands
+- An IAM role for Pipelines to assume when running Terragrunt apply commands
+
+
+
+
+**Required Azure Resources:**
+
+- Entra ID Application for plans with Federated Identity Credential
+- Entra ID Application for applies with Federated Identity Credential
+- Service Principals with appropriate role assignments
+- Storage Account and Container for Terragrunt state storage (if not already existing)
+
+
+
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+## Configuring SCM Access
+
+Pipelines needs the ability to interact with GitLab to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself).
+
+To create machine users for GitLab access, follow our [machine users guide](/2.0/docs/pipelines/installation/viamachineusers) to set up the appropriate Personal Access Tokens (PATs) with the required permissions.
+
+:::note Progress Checklist
+
+
+
+:::
+
+## Bootstrapping Cloud Infrastructure
+
+If your AWS accounts / Azure subscriptions don't already have all the required OIDC and state management resources, you'll need to bootstrap them. This section provides the infrastructure code needed to set up these resources.
+
+:::tip
+
+If you already have all the resources listed, you can skip this section.
+
+If you have some of them provisioned, but not all, you can decide to either destroy the resources you already have provisioned and recreate them or import them into state. If you are not sure, please contact [Gruntwork support](/support).
+
+:::
+
+### Prepare Your Project
+
+Clone your project to your local machine using [Git](https://docs.gitlab.com/user/project/repository/index.html#clone-a-repository) if you haven't already.
+
+:::tip
+
+If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads).
+
+:::
+
+For example:
+
+```bash
+git clone git@gitlab.com:acme/infrastructure-live.git
+cd infrastructure-live
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+To bootstrap your project, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function.
+
+The easiest way to install Boilerplate is to use `mise` to install it.
+
+:::tip
+
+If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html).
+
+:::
+
+```bash
+mise use -g boilerplate@latest
+```
+
+:::tip
+
+If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions.
+
+```bash
+mise ls-remote boilerplate
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+If you don't already have Terragrunt and OpenTofu installed locally, you can install them using `mise`:
+
+```bash
+mise use -g terragrunt@latest opentofu@latest
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+### Cloud-specific bootstrap instructions
+
+
+
+
+The resources you need provisioned in AWS to start managing resources with Pipelines are:
+
+1. An OpenID Connect (OIDC) provider
+2. An IAM role for Pipelines to assume when running Terragrunt plan commands
+3. An IAM role for Pipelines to assume when running Terragrunt apply commands
+
+For every account you want Pipelines to manage infrastructure in.
+
+:::tip Don't Panic!
+
+This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project.
+
+If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository.
+
+:::
+
+The process that we'll follow to get these resources ready for Pipelines is:
+
+1. Use Boilerplate to scaffold bootstrap configurations in your project for each AWS account
+2. Use Terragrunt to provision these resources in your AWS accounts
+3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Bootstrap Your Project for AWS
+
+First, confirm that you have a `root.hcl` file in the root of your project that looks something like this:
+
+```hcl title="root.hcl"
+locals {
+ account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl"))
+ state_bucket_name = local.account_hcl.locals.state_bucket_name
+
+ region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl"))
+ aws_region = local.region_hcl.locals.aws_region
+}
+
+remote_state {
+ backend = "s3"
+ generate = {
+ path = "backend.tf"
+ if_exists = "overwrite"
+ }
+ config = {
+ bucket = local.state_bucket_name
+ region = local.aws_region
+ key = "${path_relative_to_include()}/tofu.tfstate"
+ encrypt = true
+ use_lockfile = true
+ }
+}
+
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Provision AWS Bootstrap Resources
+
+Once you've scaffolded out the accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts.
+
+:::tip
+
+Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it.
+
+You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role.
+
+:::
+
+For each account you want to bootstrap, you'll need to run the following commands:
+
+First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the AWS account you want to bootstrap.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache plan
+```
+
+:::tip
+
+We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/).
+
+:::
+
+Next, apply the changes to your account.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache apply
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+
+
+
+The resources you need provisioned in Azure to start managing resources with Pipelines are:
+
+1. An Azure Resource Group for OpenTofu state resources
+ 1. An Azure Storage Account in that resource group for OpenTofu state storage
+ 1. An Azure Storage Container in that storage account for OpenTofu state storage
+2. An Entra ID Application to use for plans
+ 1. A Flexible Federated Identity Credential for the application to authenticate with your project on any branch
+ 2. A Service Principal for the application to be used in role assignments
+ 1. A role assignment for the service principal to access the Azure subscription
+ 2. A role assignment for the service principal to access the Azure Storage Account
+3. An Entra ID Application to use for applies
+ 1. A Federated Identity Credential for the application to authenticate with your project on the deploy branch
+ 2. A Service Principal for the application to be used in role assignments
+ 1. A role assignment for the service principal to access the Azure subscription
+
+:::tip Don't Panic!
+
+This may seem like a lot to set up, but the content you need to add to your project is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your project.
+
+If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository.
+
+:::
+
+The process that we'll follow to get these resources ready for Pipelines is:
+
+1. Use Boilerplate to scaffold bootstrap configurations in your project for each Azure subscription
+2. Use Terragrunt to provision these resources in your Azure subscription
+3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned
+4. Pull the bootstrap resources into state, now that we have configured a remote state backend
+5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Bootstrap Your Project for Azure
+
+For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your project for each subscription:
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+You'll need to run this boilerplate command once for each Azure subscription you want to manage with Pipelines. Boilerplate will prompt you for subscription-specific values each time.
+
+:::
+
+:::tip
+
+You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=dev' \
+ --var 'GitLabGroupName=acme' \
+ --var 'GitLabRepoName=infrastructure-live' \
+ --var 'GitLabInstanceURL=https://gitlab.com' \
+ --var 'SubscriptionName=dev' \
+ --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \
+ --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \
+ --var 'AzureLocation=East US' \
+ --var 'StateResourceGroupName=pipelines-rg' \
+ --var 'StateStorageAccountName=mysa' \
+ --var 'StateStorageContainerName=tfstate' \
+ --non-interactive
+```
+
+You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag.
+
+```yaml title="vars.yml"
+AccountName: dev
+GitLabGroupName: acme
+GitLabRepoName: infrastructure-live
+GitLabInstanceURL: https://gitlab.com
+SubscriptionName: dev
+AzureTenantID: 00000000-0000-0000-0000-000000000000
+AzureSubscriptionID: 11111111-1111-1111-1111-111111111111
+AzureLocation: East US
+StateResourceGroupName: pipelines-rg
+StateStorageAccountName: my-storage-account
+StateStorageContainerName: tfstate
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/gitlab/subscription?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Provision Azure Bootstrap Resources
+
+Once you've scaffolded out the subscriptions you want to bootstrap, you can use Terragrunt to provision the resources in your Azure subscription.
+
+If you haven't already, you'll want to authenticate to Azure using the `az` CLI.
+
+```bash
+az login
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+
+To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI:
+
+- `ARM_TENANT_ID`
+- `ARM_SUBSCRIPTION_ID`
+
+For example:
+
+```bash
+export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111"
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+First, make sure that everything is set up correctly by running a plan in the subscription directory.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache plan
+```
+
+:::tip
+
+We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/).
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Next, apply the changes to your subscription.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply
+```
+
+:::tip
+
+We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state.
+
+:::
+
+:::note Progress Checklist
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Finalizing Terragrunt configurations
+
+Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned.
+
+First, edit the `root.hcl` file in the root of your project to leverage the storage account we just provisioned.
+
+If your `root.hcl` file doesn't already have a remote state backend configuration, you'll need to add one that looks like this:
+
+```hcl title="root.hcl"
+locals {
+ sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl"))
+
+ state_resource_group_name = local.sub_hcl.locals.state_resource_group_name
+ state_storage_account_name = local.sub_hcl.locals.state_storage_account_name
+ state_storage_container_name = local.sub_hcl.locals.state_storage_container_name
+}
+
+remote_state {
+ backend = "azurerm"
+ generate = {
+ path = "backend.tf"
+ if_exists = "overwrite"
+ }
+ config = {
+ resource_group_name = local.state_resource_group_name
+ storage_account_name = local.state_storage_account_name
+ container_name = local.state_storage_container_name
+ key = "${path_relative_to_include()}/tofu.tfstate"
+ }
+}
+
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+:::
+
+Next, finalize the `.gruntwork/environment-.hcl` file in the root of your project to reference the IDs for the applications we just provisioned.
+
+You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`.
+
+```bash
+terragrunt stack output
+```
+
+The relevant bits that you want to extract from the stack output are the following:
+
+```hcl
+bootstrap = {
+ apply_app = {
+ client_id = "33333333-3333-3333-3333-333333333333"
+ }
+ plan_app = {
+ client_id = "44444444-4444-4444-4444-444444444444"
+ }
+}
+```
+
+You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file.
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Pulling the resources into state
+
+Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy
+```
+
+:::tip
+
+We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state.
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+
+
+
+## Creating `.gruntwork` HCL Configurations
+
+Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your project to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s).
+
+### The `repository` block
+
+The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly.
+
+```hcl title=".gruntwork/repository.hcl"
+repository {
+ deploy_branch_name = "main"
+}
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+### The `environment` block
+
+Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block).
+
+For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment.
+
+
+
+
+```hcl title=".gruntwork/environment-production.hcl"
+environment "production" {
+ filter {
+ paths = ["prod/*"]
+ }
+
+ authentication {
+ aws_oidc {
+ account_id = "123456789012"
+ plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan"
+ apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply"
+ }
+ }
+}
+```
+
+:::tip
+
+Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page.
+
+:::
+
+:::tip
+
+Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to reuse common AWS configurations.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+
+
+
+:::
+
+
+
+
+```hcl title=".gruntwork/environment-production.hcl"
+environment "production" {
+ filter {
+ paths = ["prod/*"]
+ }
+
+ authentication {
+ azure_oidc {
+ tenant_id = "00000000-0000-0000-0000-000000000000"
+ subscription_id = "11111111-1111-1111-1111-111111111111"
+
+ plan_client_id = "33333333-3333-3333-3333-333333333333"
+ apply_client_id = "44444444-4444-4444-4444-444444444444"
+ }
+ }
+}
+```
+
+:::tip
+
+Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+
+
+
+
+:::
+
+
+
+
+```hcl title=".gruntwork/environment-production.hcl"
+environment "production" {
+ filter {
+ paths = ["prod/*"]
+ }
+
+ authentication {
+ custom {
+ auth_provider_cmd = "./scripts/custom-auth-prod.sh"
+ }
+ }
+}
+```
+
+:::tip
+
+Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+
+
+
+:::
+
+
+
+
+## Creating `.gitlab-ci.yml`
+
+Create a `.gitlab-ci.yml` file in the root of your project with the following content:
+
+```yaml title=".gitlab-ci.yml"
+include:
+ - project: 'gruntwork-io/gitlab-pipelines-workflows'
+ file: '/workflows/pipelines.yml'
+ ref: 'v1'
+```
+
+:::tip
+
+You can read the [Pipelines GitLab CI Workflow](https://gitlab.com/gruntwork-io/gitlab-pipelines-workflows) to learn how this GitLab CI pipeline calls the Pipelines CLI to run your pipelines.
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+## Commit and Push Your Changes
+
+Commit and push your changes to your project.
+
+:::note
+
+You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow before everything is properly configured.
+
+:::
+
+```bash
+git add .
+git commit -m "Add Pipelines configurations and GitLab CI workflow [skip ci]"
+git push
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+🚀 You've successfully added Gruntwork Pipelines to your existing GitLab project!
+
+## Next Steps
+
+You have successfully completed the installation of Gruntwork Pipelines in an existing GitLab project. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) to begin deploying changes.
+
+## Troubleshooting Tips
+
+If you encounter issues during the setup process, here are some common troubleshooting steps:
+
+### Bootstrap Resources Failure
+
+If your bootstrap resource provisioning fails:
+
+
+
+
+
+
+
+### HCL Configuration Issues
+
+If your HCL configurations aren't working as expected:
+
+
+
+
+
+### GitLab CI Pipeline Issues
+
+If your GitLab CI pipeline isn't working as expected:
+
+
+
+
+
+
+
+
+
diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.md b/docs/2.0/docs/pipelines/installation/addingexistingrepo.md
deleted file mode 100644
index 9149fbd80c..0000000000
--- a/docs/2.0/docs/pipelines/installation/addingexistingrepo.md
+++ /dev/null
@@ -1,553 +0,0 @@
-import CustomizableValue from '/src/components/CustomizableValue';
-
-# Adding Gruntwork Pipelines to an existing repository
-
-This guide provides instructions for installing Gruntwork Pipelines in a repository with existing IaC. This guide is for Gruntwork customers looking to integrate Pipelines into their existing repositories for streamlined infrastructure management.
-
-:::info
-
-This process leverages a new configuration paradigm for Pipelines called ["Pipelines Configuration as Code"](/2.0/reference/pipelines/configurations-as-code), introduced in July 2024. This system allows developers to use Gruntwork Pipelines with any folder structure in their IaC repositories. Previously, Pipelines required a specific folder layout to map source control directories to AWS Accounts for authentication.
-
-**As of Q4 2024, this new configuration system does not yet support the [Gruntwork Account Factory](https://docs.gruntwork.io/2.0/docs/accountfactory/concepts/).** If you need both Pipelines and the Account Factory, we recommend [starting with a new repository](/2.0/docs/pipelines/installation/addingnewrepo) or contacting [Gruntwork support](/support) for assistance.
-:::
-
-## Prerequisites
-
-- **Active Gruntwork subscription**: Ensure your account includes access to Pipelines. Verify access by navigating to the "View team in GitHub" option in the [Gruntwork Developer Portal's account page](https://app.gruntwork.io/account) if you are an admin. From the GitHub team UI, search for "pipelines" under the repositories tab to confirm access.
-- **AWS credentials**: You need credentials with permissions to create resources in the AWS account where Pipelines will be deployed. This includes creating an OpenID Connect (OIDC) Provider and AWS Identity and Access Management (IAM) roles for Pipelines to use when deploying infrastructure.
-
-## Setting up the repository
-
-### Account information
-
-Create an `accounts.yml` file in the root directory of your repository with the following content. Replace , , and with the appropriate values for the account you are deploying to. Add additional accounts as needed to manage them with Pipelines.
-
- ```yaml title="accounts.yml"
- # required: Name of an account
- $$AWS_ACCOUNT_NAME$$:
- # required: The AWS account ID
- id: "$$AWS_ACCOUNT_ID$$"
- # required: The email address of the account owner
- email: "$$AWS_ACCOUNT_EMAIL$$"
- ```
-
-### Pipelines configurations
-
-Create a file named `.gruntwork/gruntwork.hcl` in the root directory of your repository with the following content. This file is used to configure Pipelines for your repository. Update the specified placeholders with the appropriate values:
-
-- : Specify a name that represents the environment being deployed, such as `production`, `staging`, or `development`.
-- : Define the root-relative path of the folder in your repository that contains the terragrunt units for the environment you are deploying to. This may be the same as the environment name if there is a directory in the root of the repository that contains all the terragrunt units for the environment.
-- : Enter the AWS Account ID associated with the deployment of Terragrunt units for the specified environment.
-- : Specify the branch name used for deployments, such as `main` or `master`. This branch will trigger the Pipelines apply workflow when changes are merged. Pull requests targeting this branch will trigger the Pipelines plan workflow.
-
-
-```hcl title=".gruntwork/gruntwork.hcl"
-# Configurations applicable to the entire repository https://docs.gruntwork.io/2.0/docs/pipelines/installation/addingexistingrepo#repository-blocks
-repository {
- deploy_branch_name = "$$DEPLOY_BRANCH_NAME$$"
-}
-
-aws {
- accounts "all" {
- // Reading the accounts.yml file from the root of the repository
- path = "../accounts.yml"
- }
-}
-
-# Configurations that are applicable to a specific environment within a repository # https://docs.gruntwork.io/2.0/docs/pipelines/installation/addingexistingrepo#environment-blocks
-environment "$$ENVIRONMENT_NAME$$" {
- filter {
- paths = ["$$PATH_TO_ENVIRONMENT$$/*"]
- }
-
- authentication {
- aws_oidc {
- account_id = aws.accounts.all.$$AWS_ACCOUNT_NAME$$.id
- plan_iam_role_arn = "arn:aws:iam::${aws.accounts.all.$$AWS_ACCOUNT_NAME$$.id}:role/pipelines-plan"
- apply_iam_role_arn = "arn:aws:iam::${aws.accounts.all.$$AWS_ACCOUNT_NAME$$.id}:role/pipelines-apply"
- }
- }
-}
-```
-
-The IAM roles mentioned in the unit configuration above will be created in the [Pipelines OpenID Connect (OIDC) Provider and Roles](#pipelines-openid-connectoidc-provider-and-roles) section.
-
-For additional environments, you can add new [environment configurations](/2.0/reference/pipelines/configurations-as-code#environment-configurations). Alternatively, consider using [unit configuration](/2.0/reference/pipelines/configurations-as-code#unit-configurations) for Terragrunt units in your repository that do not align with an environment configuration.
-
-### Pipelines GitHub Actions (GHA) workflow
-
-Pipelines is implemented using a GitHub [reusable workflow](https://docs.github.com/en/actions/sharing-automations/reusing-workflows#creating-a-reusable-workflow). The actual code for Pipelines and its features resides in an external repository, typically [Gruntwork's Pipelines Workflows repository](https://github.com/gruntwork-io/pipelines-workflows/). Your repository references this external workflow rather than containing the implementation itself.
-
-Create a file named `.github/workflows/pipelines.yml` in the root of your repository with the following content:
-
-
-Pipelines GHA workflow file
-
-```yaml title=".github/workflows/pipelines.yml"
-######################################################################################################################
-# INFRASTRUCTURE CI/CD CONFIGURATION
-#
-# This file configures GitHub Actions to implement a CI/CD pipeline for managing infrastructure code.
-#
-# The pipeline defined in this configuration includes the following steps:
-#
-# - For any commit on any branch, identify all Terragrunt modules that have changed between the `HEAD` of the branch and
-# `main`, and run `terragrunt plan` on each of those modules.
-# - For commits to `main`, execute `terragrunt apply` on each of the updated modules.
-#
-######################################################################################################################
-
-name: Pipelines
-run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}"
-on:
- push:
- branches:
- - $$DEPLOY_BRANCH_NAME$$
- paths-ignore:
- # Workflow does not run only if ALL filepaths match the pattern. See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#example-excluding-paths
- - ".github/**"
- pull_request:
- types:
- - opened
- - synchronize
- - reopened
-
-# Permissions to assume roles and create pull requests
-permissions:
- id-token: write
-
-jobs:
- GruntworkPipelines:
- # https://github.com/gruntwork-io/pipelines-workflows/blob/v3/.github/workflows/pipelines.yml
- uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@v3
- secrets:
- PIPELINES_READ_TOKEN: ${{ secrets.PIPELINES_READ_TOKEN }}
-
- PipelinesPassed:
- needs: GruntworkPipelines
- if: always()
- runs-on: ubuntu-latest
- steps:
- - run: |
- echo "::debug::RESULT: $RESULT"
- if [[ $RESULT = "success" ]]; then
- echo "GruntworkPipelines completed successfully!"
- else
- echo "GruntworkPipelines failed!"
- exit 1
- fi
- env:
- RESULT: ${{ needs.GruntworkPipelines.result }}
-```
-
-
-
-### Pipelines OpenID Connect (OIDC) provider and roles
-
-This step involves creating the Infrastructure as Code (IaC) configuration for the [OIDC](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services) roles required by Pipelines to deploy infrastructure.
-
-Two roles are needed:
-- `pipelines-plan` for plans
-- `pipelines-apply` for applies
-
-Using two distinct roles upholds the principle of least privilege. The `pipelines-plan` role is used during pull request creation or updates and requires primarily read-only permissions. The `pipelines-apply` role, used during pull request merges, requires read/write permissions. Additionally, these roles have different IAM trust policies. The `apply` role only trusts the deploy branch, while the `plan` role trusts all branches.
-
-This step requires AWS credentials with sufficient permissions to create the necessary IAM resources that Pipelines will assume when deploying infrastructure.
-
-#### Create the Terragrunt units
-
-Within the ** directory, create the Terragrunt unit files as described below, updating the following values as needed:
-
-- : Specify the state bucket name or pattern of the state bucket(s) to be used for the environment. The Pipeline roles must have permissions to access the state bucket for storing and retrieving state files.
-- : Specify the name of the DynamoDB table used for state locking.
-- : Provide the exact name of the repository where Pipelines is being configured.
-
-
-OIDC Provider
-
-```hcl title="$$PATH_TO_ENVIRONMENT$$/_global/github-actions-openid-connect-provider/terragrunt.hcl"
-terraform {
- source = "git@github.com:gruntwork-io/terraform-aws-security.git//modules/github-actions-openid-connect-provider?ref=v0.74.5"
-}
-
-# Include the root `terragrunt.hcl` configuration, which has settings common across all environments & components.
-include "root" {
- path = find_in_parent_folders()
-}
-
-inputs = {
- allowed_organizations = [
- "$$GITHUB_ORG_NAME$$",
- ]
-}
-```
-
-
-
-
-Pipelines Plan
-
-```hcl title="$$PATH_TO_ENVIRONMENT$$/_global/pipelines-plan-role/terragrunt.hcl"
-terraform {
- source = "git@github.com:gruntwork-io/terraform-aws-security.git//modules/github-actions-iam-role?ref=v0.74.5"
-}
-
-# Include the root `terragrunt.hcl` configuration, which has settings common across all environments & components.
-include "root" {
- path = find_in_parent_folders()
-}
-
-# The OIDC IAM roles for GitHub Actions require an IAM OpenID Connect (OIDC) Provider to be provisioned for each account.
-# The underlying module used in `envcommon` is capable of creating the OIDC provider. Since multiple OIDC roles are required,
-# a dedicated module is used, and all roles depend on its output
-dependency "github-actions-openid-connect-provider" {
- config_path = "../github-actions-openid-connect-provider"
-
- # Configure mock outputs for the `validate` command that are returned when there are no outputs available (e.g the
- # module hasn't been applied yet.
- mock_outputs_allowed_terraform_commands = ["validate", "plan"]
- mock_outputs_merge_strategy_with_state = "shallow"
- mock_outputs = {
- arn = "known_after_apply"
- url = "token.actions.githubusercontent.com"
- }
-}
-
-locals {
- state_bucket_pattern = lower("$$AWS_STATE_BUCKET_PATTERN$$")
-}
-
-inputs = {
- github_actions_openid_connect_provider_arn = dependency.github-actions-openid-connect-provider.outputs.arn
- github_actions_openid_connect_provider_url = dependency.github-actions-openid-connect-provider.outputs.url
-
- allowed_sources_condition_operator = "StringLike"
-
- allowed_sources = {
- "$$GITHUB_ORG_NAME$$/$$INFRASTRUCTURE_LIVE_REPO_NAME$$" : ["*"]
- }
-
- custom_iam_policy_name = "pipelines-plan-oidc-policy"
- iam_role_name = "pipelines-plan"
-
- # Policy based on these docs:
- # https://terragrunt.gruntwork.io/docs/features/aws-auth/#aws-iam-policies
- iam_policy = {
- # State permissions
- "DynamoDBLocksTableAccess" = {
- effect = "Allow"
- actions = [
- "dynamodb:PutItem",
- "dynamodb:GetItem",
- "dynamodb:DescribeTable",
- "dynamodb:DeleteItem",
- "dynamodb:CreateTable",
- ]
- resources = ["arn:aws:dynamodb:*:*:table/$$AWS_DYNAMO_DB_TABLE$$"]
- }
- "S3StateBucketAccess" = {
- effect = "Allow"
- actions = [
- "s3:ListBucket",
- "s3:GetBucketVersioning",
- "s3:GetBucketAcl",
- "s3:GetBucketLogging",
- "s3:CreateBucket",
- "s3:PutBucketPublicAccessBlock",
- "s3:PutBucketTagging",
- "s3:PutBucketPolicy",
- "s3:PutBucketVersioning",
- "s3:PutEncryptionConfiguration",
- "s3:PutBucketAcl",
- "s3:PutBucketLogging",
- "s3:GetEncryptionConfiguration",
- "s3:GetBucketPolicy",
- "s3:GetBucketPublicAccessBlock",
- "s3:PutLifecycleConfiguration",
- "s3:PutBucketOwnershipControls",
- ]
- resources = [
- "arn:aws:s3:::${local.state_bucket_pattern}",
- ]
- }
- "S3StateBucketObjectAccess" = {
- effect = "Allow"
- actions = [
- "s3:PutObject",
- "s3:GetObject"
- ]
- resources = [
- "arn:aws:s3:::${local.state_bucket_pattern}/*",
- ]
- }
- }
-}
-```
-
-
-
-
-Pipelines Apply
-
-
-
-```hcl title="$$PATH_TO_ENVIRONMENT$$/_global/pipelines-apply-role/terragrunt.hcl"
-terraform {
- source = "git@github.com:gruntwork-io/terraform-aws-security.git//modules/github-actions-iam-role?ref=v0.74.5"
-}
-
-# Include the root `terragrunt.hcl` configuration, which has settings common across all environments & components.
-include "root" {
- path = find_in_parent_folders()
-}
-
-# The OIDC IAM roles for GitHub Actions require an IAM OpenID Connect (OIDC) Provider to be provisioned for each account.
-# The underlying module used in `envcommon` is capable of creating the OIDC provider. Since multiple OIDC roles are required,
-# a dedicated module is used, and all roles depend on its output.
-dependency "github-actions-openid-connect-provider" {
- config_path = "../github-actions-openid-connect-provider"
-
- # Configure mock outputs for the `validate` command that are returned when there are no outputs available (e.g the
- # module hasn't been applied yet.
- mock_outputs_allowed_terraform_commands = ["validate", "plan"]
- mock_outputs_merge_strategy_with_state = "shallow"
- mock_outputs = {
- arn = "known_after_apply"
- url = "token.actions.githubusercontent.com"
- }
-}
-
-locals {
- # Automatically load account-level variables
- state_bucket_pattern = lower("$$AWS_STATE_BUCKET_PATTERN$$")
-}
-
-inputs = {
- github_actions_openid_connect_provider_arn = dependency.github-actions-openid-connect-provider.outputs.arn
- github_actions_openid_connect_provider_url = dependency.github-actions-openid-connect-provider.outputs.url
-
- allowed_sources = {
- "$$GITHUB_ORG_NAME$$/$$INFRASTRUCTURE_LIVE_REPO_NAME$$" : ["$$DEPLOY_BRANCH_NAME$$"]
- }
-
- # Policy for OIDC role assumed from GitHub in the "$$GITHUB_ORG_NAME$$/$$INFRASTRUCTURE_LIVE_REPO_NAME$$" repo
- custom_iam_policy_name = "pipelines-apply-oidc-policy"
- iam_role_name = "pipelines-apply"
-
- # Policy based on these docs:
- # https://terragrunt.gruntwork.io/docs/features/aws-auth/#aws-iam-policies
- iam_policy = {
- "IamPassRole" = {
- resources = ["*"]
- actions = ["iam:*"]
- effect = "Allow"
- }
- "IamCreateRole" = {
- resources = [
- "arn:aws:iam::*:role/aws-service-role/orgsdatasync.servicecatalog.amazonaws.com/AWSServiceRoleForServiceCatalogOrgsDataSync"
- ]
- actions = ["iam:CreateServiceLinkedRole"]
- effect = "Allow"
- }
- "S3BucketAccess" = {
- resources = ["*"]
- actions = ["s3:*"]
- effect = "Allow"
- }
- "DynamoDBLocksTableAccess" = {
- resources = ["arn:aws:dynamodb:*:*:table/terraform-locks"]
- actions = ["dynamodb:*"]
- effect = "Allow"
- }
- "OrganizationsDeployAccess" = {
- resources = ["*"]
- actions = ["organizations:*"]
- effect = "Allow"
- }
- "ControlTowerDeployAccess" = {
- resources = ["*"]
- actions = ["controltower:*"]
- effect = "Allow"
- }
- "IdentityCenterDeployAccess" = {
- resources = ["*"]
- actions = ["sso:*", "ds:*", "sso-directory:*"]
- effect = "Allow"
- }
- "ECSDeployAccess" = {
- resources = ["*"]
- actions = ["ecs:*"]
- effect = "Allow"
- }
- "ACMDeployAccess" = {
- resources = ["*"]
- actions = ["acm:*"]
- effect = "Allow"
- }
- "AutoScalingDeployAccess" = {
- resources = ["*"]
- actions = ["autoscaling:*"]
- effect = "Allow"
- }
- "CloudTrailDeployAccess" = {
- resources = ["*"]
- actions = ["cloudtrail:*"]
- effect = "Allow"
- }
- "CloudWatchDeployAccess" = {
- resources = ["*"]
- actions = ["cloudwatch:*", "logs:*"]
- effect = "Allow"
- }
- "CloudFrontDeployAccess" = {
- resources = ["*"]
- actions = ["cloudfront:*"]
- effect = "Allow"
- }
- "ConfigDeployAccess" = {
- resources = ["*"]
- actions = ["config:*"]
- effect = "Allow"
- }
- "EC2DeployAccess" = {
- resources = ["*"]
- actions = ["ec2:*"]
- effect = "Allow"
- }
- "ECRDeployAccess" = {
- resources = ["*"]
- actions = ["ecr:*"]
- effect = "Allow"
- }
- "ELBDeployAccess" = {
- resources = ["*"]
- actions = ["elasticloadbalancing:*"]
- effect = "Allow"
- }
- "GuardDutyDeployAccess" = {
- resources = ["*"]
- actions = ["guardduty:*"]
- effect = "Allow"
- }
- "IAMDeployAccess" = {
- resources = ["*"]
- actions = ["iam:*", "access-analyzer:*"]
- effect = "Allow"
- }
- "KMSDeployAccess" = {
- resources = ["*"]
- actions = ["kms:*"]
- effect = "Allow"
- }
- "LambdaDeployAccess" = {
- resources = ["*"]
- actions = ["lambda:*"]
- effect = "Allow"
- }
- "Route53DeployAccess" = {
- resources = ["*"]
- actions = ["route53:*", "route53domains:*", "route53resolver:*"]
- effect = "Allow"
- }
- "SecretsManagerDeployAccess" = {
- resources = ["*"]
- actions = ["secretsmanager:*"]
- effect = "Allow"
- }
- "SNSDeployAccess" = {
- resources = ["*"]
- actions = ["sns:*"]
- effect = "Allow"
- }
- "SQSDeployAccess" = {
- resources = ["*"]
- actions = ["sqs:*"]
- effect = "Allow"
- }
- "SecurityHubDeployAccess" = {
- resources = ["*"]
- actions = ["securityhub:*"]
- effect = "Allow"
- }
- "MacieDeployAccess" = {
- resources = ["*"]
- actions = ["macie2:*"]
- effect = "Allow"
- }
- "ServiceQuotaDeployAccess" = {
- resources = ["*"]
- actions = ["servicequotas:*"]
- effect = "Allow"
- }
- "EKSAccess" = {
- resources = ["*"]
- actions = ["eks:*"]
- effect = "Allow"
- }
- "EventBridgeAccess" = {
- resources = ["*"]
- actions = ["events:*"]
- effect = "Allow"
- }
- "ApplicationAutoScalingAccess" = {
- resources = ["*"]
- actions = ["application-autoscaling:*"]
- effect = "Allow"
- }
- "ApiGatewayAccess" = {
- resources = ["*"]
- actions = ["apigateway:*"]
- effect = "Allow"
- }
- }
-}
-```
-
-
-
-
-
-
-:::tip
-
-The permissions in the files above are provided as examples and should be adjusted to align with the specific types of infrastructure managed in the repository. This ensures that Pipelines can execute the required actions to deploy your infrastructure effectively.
-
-Additionally, note that the IAM permissions outlined above do not include permissions to modify the role itself, for security purposes.
-
-:::
-
-Repeat this step for each environment you would like to manage with Pipelines.
-
-#### Create the OIDC resources
-
-Use your personal AWS access to execute the following commands to deploy the infrastructure for the Terragrunt units created in the previous step. Repeat this process for each account you plan to manage with Pipelines.
-
- ```bash
- cd $$PATH_TO_ENVIRONMENT$$/_global
- terragrunt run-all plan
- ```
-
-Review the plan output, and if everything appears correct, proceed to apply the changes.
-
-
- ```bash
- terragrunt run-all apply
- ```
-
-:::tip
-
-If you encounter issues with the plan or apply steps due to the presence of other resources in the *_global* folder, you can run the plan/apply steps individually for the Terragrunt units. Start with the `github-actions-openid-connect-provider` unit, as other units depend on it.
-
-:::
-
-#### Commit and push the changes
-
-Create a new branch and commit all changes, including **`[skip ci]`** in the commit message to prevent triggering the Pipelines workflow. Push the changes to the repository, create a Pull Request, and merge the changes into the branch specified in the `.github/workflows/pipelines.yml` file.
-
-## Enable GitHub authentication for pipelines
-
-Follow the instructions in [Authenticating via GitHub App](/2.0/docs/pipelines/installation/viagithubapp) to enable GitHub authentication for Pipelines in your repository using the Gruntwork.io GitHub App. This is the recommended authentication method. Alternatively, you can [Authenticate via Machine Users](/2.0/docs/pipelines/installation/viamachineusers) if preferred.
-
-## Next steps
-
-You have successfully completed the installation of Gruntwork Pipelines in an existing repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md) to begin deploying changes.
diff --git a/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx
new file mode 100644
index 0000000000..5078e7e77c
--- /dev/null
+++ b/docs/2.0/docs/pipelines/installation/addingexistingrepo.mdx
@@ -0,0 +1,885 @@
+# Bootstrap Pipelines in an Existing Repository
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import PersistentCheckbox from '/src/components/PersistentCheckbox';
+
+This guide provides comprehensive instructions for integrating [Gruntwork Pipelines](https://gruntwork.io/products/pipelines/) into an existing repository with Infrastructure as Code (IaC). This is designed for Gruntwork customers who want to add Pipelines to their current infrastructure repositories for streamlined CI/CD management.
+
+To configure Gruntwork Pipelines in an existing repository, complete the following steps (which are explained in detail below):
+
+1. **Plan your Pipelines setup** by identifying all environments and cloud accounts/subscriptions you need to manage.
+2. **Bootstrap core infrastructure** in accounts/subscriptions that don't already have the required OIDC and state management resources.
+3. **Configure SCM access** using either the [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) or [machine users](https://docs.github.com/en/get-started/learning-about-github/types-of-github-accounts#user-accounts).
+4. **Create `.gruntwork` HCL configurations** to tell Pipelines how to authenticate and organize your environments.
+5. **Create `.github/workflows/pipelines.yml`** to configure your GitHub Actions workflow.
+6. **Commit and push** your changes to activate Pipelines.
+
+## Prerequisites
+
+Before starting, ensure you have:
+
+- **An active Gruntwork subscription** with Pipelines access. Verify by checking the [Gruntwork Developer Portal](https://app.gruntwork.io/account) and confirming access to "pipelines" repositories in your GitHub team.
+- **Cloud provider credentials** with permissions to create OIDC providers and IAM roles in accounts where Pipelines will manage infrastructure.
+- **Git installed** locally for cloning and managing your repository.
+- **Existing IaC repository** with Terragrunt configurations you want to manage with Pipelines (if you are using OpenTofu/Terraform, and want to start using Terragrunt, read the [Quickstart Guide](https://terragrunt.gruntwork.io/docs/getting-started/quick-start)).
+
+## Planning Your Pipelines Setup
+
+Before implementing Pipelines, it's crucial to plan your setup by identifying all the environments and cloud resources you need to manage.
+
+### Identify Your Environments
+
+Review your existing repository structure and identify:
+
+1. **All environments** you want to manage with Pipelines (e.g., `dev`, `staging`, `prod`)
+2. **Cloud accounts/subscriptions** associated with each environment
+3. **Directory paths** in your repository that contain Terragrunt units for each environment
+4. **Existing OIDC resources** that may already be provisioned in your accounts
+
+:::note Progress Checklist
+
+
+
+
+
+
+:::
+
+### Determine Required OIDC Roles
+
+For each AWS Account / Azure Subscription you want to manage, you might already have some or all of the following resources provisioned.
+
+
+
+
+**Required AWS Resources:**
+
+- An OIDC provider for GitHub Actions
+- An IAM role for Pipelines to assume when running Terragrunt plan commands
+- An IAM role for Pipelines to assume when running Terragrunt apply commands
+
+
+
+
+**Required Azure Resources:**
+
+- Entra ID Application for plans with Federated Identity Credential
+- Entra ID Application for applies with Federated Identity Credential
+- Service Principals with appropriate role assignments
+- Storage Account and Container for Terragrunt state storage (if not already existing)
+
+
+
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+## Configuring SCM Access
+
+Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself).
+
+There are two ways to configure SCM access for Pipelines:
+
+1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users).
+2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers) (recommended for GitHub users who cannot use the GitHub App).
+
+:::note Progress Checklist
+
+
+
+:::
+
+## Bootstrapping Cloud Infrastructure
+
+If your AWS accounts / Azure subscriptions don't already have all the required OIDC and state management resources, you'll need to bootstrap them. This section provides the infrastructure code needed to set up these resources.
+
+:::tip
+
+If you already have all the resources listed, you can skip this section.
+
+If you have some of them provisioned, but not all, you can decide to either destroy the resources you already have provisioned and recreate them or import them into state. If you are not sure, please contact [Gruntwork support](/support).
+
+:::
+
+### Prepare Your Repository
+
+Clone your repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository) if you haven't already.
+
+:::tip
+
+If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads).
+
+:::
+
+For example:
+
+```bash
+git clone git@github.com:acme/infrastructure-live.git
+cd infrastructure-live
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+To bootstrap your repository, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function.
+
+The easiest way to install Boilerplate is to use `mise` to install it.
+
+:::tip
+
+If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html).
+
+:::
+
+```bash
+mise use -g boilerplate@latest
+```
+
+:::tip
+
+If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions.
+
+```bash
+mise ls-remote boilerplate
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+If you don't already have Terragrunt and OpenTofu installed locally, you can install them using `mise`:
+
+```bash
+mise use -g terragrunt@latest opentofu@latest
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+### Cloud-specific bootstrap instructions
+
+
+
+
+The resources you need provisioned in AWS to start managing resources with Pipelines are:
+
+1. An OpenID Connect (OIDC) provider
+2. An IAM role for Pipelines to assume when running Terragrunt plan commands
+3. An IAM role for Pipelines to assume when running Terragrunt apply commands
+
+For every account you want Pipelines to manage infrastructure in.
+
+:::tip Don't Panic!
+
+This may seem like a lot to set up, but the content you need to add to your repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your repository.
+
+If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository.
+
+:::
+
+The process that we'll follow to get these resources ready for Pipelines is:
+
+1. Use Boilerplate to scaffold bootstrap configurations in your repository for each AWS account
+2. Use Terragrunt to provision these resources in your AWS accounts
+3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Bootstrap Your Repository for AWS
+
+First, confirm that you have a `root.hcl` file in the root of your repository that looks something like this:
+
+```hcl title="root.hcl"
+locals {
+ account_hcl = read_terragrunt_config(find_in_parent_folders("account.hcl"))
+ state_bucket_name = local.account_hcl.locals.state_bucket_name
+
+ region_hcl = read_terragrunt_config(find_in_parent_folders("region.hcl"))
+ aws_region = local.region_hcl.locals.aws_region
+}
+
+remote_state {
+ backend = "s3"
+ generate = {
+ path = "backend.tf"
+ if_exists = "overwrite"
+ }
+ config = {
+ bucket = local.state_bucket_name
+ region = local.aws_region
+ key = "${path_relative_to_include()}/tofu.tfstate"
+ encrypt = true
+ use_lockfile = true
+ }
+}
+
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Provision AWS Bootstrap Resources
+
+Once you've scaffolded out the accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts.
+
+:::tip
+
+Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it.
+
+You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role.
+
+:::
+
+For each account you want to bootstrap, you'll need to run the following commands:
+
+First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the AWS account you want to bootstrap.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache plan
+```
+
+:::tip
+
+We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/).
+
+:::
+
+Next, apply the changes to your account.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache apply
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+
+
+
+The resources you need provisioned in Azure to start managing resources with Pipelines are:
+
+1. An Azure Resource Group for OpenTofu state resources
+ 1. An Azure Storage Account in that resource group for OpenTofu state storage
+ 1. An Azure Storage Container in that storage account for OpenTofu state storage
+2. An Entra ID Application to use for plans
+ 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch
+ 2. A Service Principal for the application to be used in role assignments
+ 1. A role assignment for the service principal to access the Azure subscription
+ 2. A role assignment for the service principal to access the Azure Storage Account
+3. An Entra ID Application to use for applies
+ 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch
+ 2. A Service Principal for the application to be used in role assignments
+ 1. A role assignment for the service principal to access the Azure subscription
+
+:::tip Don't Panic!
+
+This may seem like a lot to set up, but the content you need to add to your repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your repository.
+
+If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository.
+
+:::
+
+The process that we'll follow to get these resources ready for Pipelines is:
+
+1. Use Boilerplate to scaffold bootstrap configurations in your repository for each Azure subscription
+2. Use Terragrunt to provision these resources in your Azure subscription
+3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned
+4. Pull the bootstrap resources into state, now that we have configured a remote state backend
+5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Bootstrap Your Repository for Azure
+
+For each Azure subscription that needs bootstrapping, we'll use Boilerplate to scaffold the necessary content. Run this command from the root of your repository for each subscription:
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+You'll need to run this boilerplate command once for each Azure subscription you want to manage with Pipelines. Boilerplate will prompt you for subscription-specific values each time.
+
+:::
+
+:::tip
+
+You can reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=dev' \
+ --var 'GitHubOrgName=acme' \
+ --var 'GitHubRepoName=infrastructure-live' \
+ --var 'SubscriptionName=dev' \
+ --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \
+ --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \
+ --var 'AzureLocation=East US' \
+ --var 'StateResourceGroupName=pipelines-rg' \
+ --var 'StateStorageAccountName=mysa' \
+ --var 'StateStorageContainerName=tfstate' \
+ --non-interactive
+```
+
+You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag.
+
+```yaml title="vars.yml"
+AccountName: dev
+GitHubOrgName: acme
+GitHubRepoName: infrastructure-live
+SubscriptionName: dev
+AzureTenantID: 00000000-0000-0000-0000-000000000000
+AzureSubscriptionID: 11111111-1111-1111-1111-111111111111
+AzureLocation: East US
+StateResourceGroupName: pipelines-rg
+StateStorageAccountName: my-storage-account
+StateStorageContainerName: tfstate
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Provision Azure Bootstrap Resources
+
+Once you've scaffolded out the subscriptions you want to bootstrap, you can use Terragrunt to provision the resources in your Azure subscription.
+
+If you haven't already, you'll want to authenticate to Azure using the `az` CLI.
+
+```bash
+az login
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+
+To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI:
+
+- `ARM_TENANT_ID`
+- `ARM_SUBSCRIPTION_ID`
+
+For example:
+
+```bash
+export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111"
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+First, make sure that everything is set up correctly by running a plan in the subscription directory.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache plan
+```
+
+:::tip
+
+We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/).
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Next, apply the changes to your subscription.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply
+```
+
+:::tip
+
+We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state.
+
+:::
+
+:::note Progress Checklist
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Finalizing Terragrunt configurations
+
+Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned.
+
+First, edit the `root.hcl` file in the root of your repository to leverage the storage account we just provisioned.
+
+If your `root.hcl` file doesn't already have a remote state backend configuration, you'll need to add one that looks like this:
+
+```hcl title="root.hcl"
+locals {
+ sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl"))
+
+ state_resource_group_name = local.sub_hcl.locals.state_resource_group_name
+ state_storage_account_name = local.sub_hcl.locals.state_storage_account_name
+ state_storage_container_name = local.sub_hcl.locals.state_storage_container_name
+}
+
+remote_state {
+ backend = "azurerm"
+ generate = {
+ path = "backend.tf"
+ if_exists = "overwrite"
+ }
+ config = {
+ resource_group_name = local.state_resource_group_name
+ storage_account_name = local.state_storage_account_name
+ container_name = local.state_storage_container_name
+ key = "${path_relative_to_include()}/tofu.tfstate"
+ }
+}
+
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+:::
+
+Next, finalize the `.gruntwork/environment-.hcl` file in the root of your repository to reference the IDs for the applications we just provisioned.
+
+You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`.
+
+```bash
+terragrunt stack output
+```
+
+The relevant bits that you want to extract from the stack output are the following:
+
+```hcl
+bootstrap = {
+ apply_app = {
+ client_id = "33333333-3333-3333-3333-333333333333"
+ }
+ plan_app = {
+ client_id = "44444444-4444-4444-4444-444444444444"
+ }
+}
+```
+
+You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file.
+
+ :::tip
+
+ We're using the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state.
+
+ :::
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Pulling the resources into state
+
+Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy
+```
+
+:::tip
+
+We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state.
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+
+
+
+## Creating `.gruntwork` HCL Configurations
+
+Create [HCL configurations](/2.0/reference/pipelines/configurations-as-code/) in the `.gruntwork` directory in the root of your repository to tell Pipelines how you plan to organize your infrastructure, and how you plan to have Pipelines authenticate with your cloud provider(s).
+
+### The `repository` block
+
+The core configuration that you'll want to start with is the `repository` block. This block tells Pipelines which branch has the "live" infrastructure you want provisioned. When you merge IaC to this branch, Pipelines will be triggered to update your infrastructure accordingly.
+
+```hcl title=".gruntwork/repository.hcl"
+repository {
+ deploy_branch_name = "main"
+}
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+### The `environment` block
+
+Next, you'll want to define the environments you want to manage with Pipelines using the [`environment` block](/2.0/reference/pipelines/configurations-as-code/api#environment-block).
+
+For each environment, you'll want to define a [`filter` block](/2.0/reference/pipelines/configurations-as-code/api#filter-block) that tells Pipelines which units are part of that environment. You'll also want to define an [`authentication` block](/2.0/reference/pipelines/configurations-as-code/api#authentication-block) that tells Pipelines how to authenticate with your cloud provider(s) for that environment.
+
+
+
+
+```hcl title=".gruntwork/environment-production.hcl"
+environment "production" {
+ filter {
+ paths = ["prod/*"]
+ }
+
+ authentication {
+ aws_oidc {
+ account_id = "123456789012"
+ plan_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-plan"
+ apply_iam_role_arn = "arn:aws:iam::123456789012:role/pipelines-apply"
+ }
+ }
+}
+```
+
+:::tip
+
+Learn more about how Pipelines authenticates to AWS in the [Authenticating to AWS](/2.0/docs/pipelines/concepts/cloud-auth/aws) page.
+
+:::
+
+:::tip
+
+Check out the [aws block](/2.0/reference/pipelines/configurations-as-code/#aws-blocks) for more information on how to configure Pipelines to reuse common AWS configurations.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+
+
+
+:::
+
+
+
+
+```hcl title=".gruntwork/environment-production.hcl"
+environment "production" {
+ filter {
+ paths = ["prod/*"]
+ }
+
+ authentication {
+ azure_oidc {
+ tenant_id = "00000000-0000-0000-0000-000000000000"
+ subscription_id = "11111111-1111-1111-1111-111111111111"
+
+ plan_client_id = "33333333-3333-3333-3333-333333333333"
+ apply_client_id = "44444444-4444-4444-4444-444444444444"
+ }
+ }
+}
+```
+
+:::tip
+
+Learn more about how Pipelines authenticates to Azure in the [Authenticating to Azure](/2.0/docs/pipelines/concepts/cloud-auth/azure) page.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+
+
+
+
+:::
+
+
+
+
+```hcl title=".gruntwork/environment-production.hcl"
+environment "production" {
+ filter {
+ paths = ["prod/*"]
+ }
+
+ authentication {
+ custom {
+ auth_provider_cmd = "./scripts/custom-auth-prod.sh"
+ }
+ }
+}
+```
+
+:::tip
+
+Learn more about how Pipelines can authenticate with custom authentication in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) page.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+
+
+
+:::
+
+
+
+
+## Creating `.github/workflows/pipelines.yml`
+
+Create a `.github/workflows/pipelines.yml` file in the root of your repository with the following content:
+
+```yaml title=".github/workflows/pipelines.yml"
+name: Pipelines
+run-name: "[GWP]: ${{ github.event.commits[0].message || github.event.pull_request.title || 'No commit message' }}"
+on:
+ push:
+ branches:
+ - main
+ paths-ignore:
+ - ".github/**"
+ pull_request:
+ types:
+ - opened
+ - synchronize
+ - reopened
+ paths-ignore:
+ - ".github/**"
+
+# Permissions to assume roles and create pull requests
+permissions:
+ id-token: write
+ contents: write
+ pull-requests: write
+
+jobs:
+ GruntworkPipelines:
+ uses: gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@v4
+```
+
+:::tip
+
+You can read the [Pipelines GitHub Actions Workflow](https://github.com/gruntwork-io/pipelines-workflows/blob/main/.github/workflows/pipelines.yml) to learn how this GitHub Actions workflow calls the Pipelines CLI to run your pipelines.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+## Commit and Push Your Changes
+
+Commit and push your changes to your repository.
+
+:::note
+
+You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow before everything is properly configured.
+
+:::
+
+```bash
+git add .
+git commit -m "Add Pipelines configurations and GitHub Actions workflow [skip ci]"
+git push
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+🚀 You've successfully added Gruntwork Pipelines to your existing repository!
+
+## Next Steps
+
+You have successfully completed the installation of Gruntwork Pipelines in an existing repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) to begin deploying changes.
+
+## Troubleshooting Tips
+
+If you encounter issues during the setup process, here are some common troubleshooting steps:
+
+### Bootstrap Resources Failure
+
+If your bootstrap resource provisioning fails:
+
+
+
+
+
+
+
+### HCL Configuration Issues
+
+If your HCL configurations aren't working as expected:
+
+
+
+
+
+### GitHub Actions Workflow Issues
+
+If your GitHub Actions workflow isn't working as expected:
+
+
+
+
+
+
+
+
diff --git a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md
deleted file mode 100644
index 367ba1d8fd..0000000000
--- a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.md
+++ /dev/null
@@ -1,192 +0,0 @@
-import CustomizableValue from '/src/components/CustomizableValue';
-
-# Adding Pipelines to a GitLab Project
-
-This guide walks you through the process of adding Gruntwork Pipelines to a GitLab project. By the end, you'll have a fully configured GitLab CI/CD pipeline that can deploy infrastructure changes automatically.
-
-## Prerequisites
-
-Before you begin, make sure you have:
-
-- Basic familiarity with Git, GitLab, and infrastructure as code concepts
-- Access to one (or many) AWS account(s) where you have permission to create IAM roles and OIDC providers
-- Completed the [Pipelines Auth setup for GitLab](/2.0/docs/pipelines/installation/viamachineusers#gitlab) and setup a machine user with appropriate PAT tokens
-- Local access to Gruntwork's GitHub repositories, specifically [boilerplate](https://github.com/gruntwork-io/boilerplate) and the [architecture catalog](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/)
-
-:::info
-
-**For custom GitLab instances only**: You must [fork](https://docs.gitlab.com/user/project/repository/forking_workflow/#create-a-fork) Gruntwork's public [Pipelines workflow project](https://gitlab.com/gruntwork-io/pipelines-workflows) into your own GitLab instance.
-
-This is necessary because Gruntwork Pipelines uses [GitLab CI/CD components](/2.0/docs/pipelines/architecture/ci-workflows), and GitLab requires components to reside within the [same GitLab instance as the project referencing them](https://docs.gitlab.com/ci/components/#use-a-component).
-
-When creating the fork, we recommend configuring it as a public mirror of the original Gruntwork project and ensuring that tags are included.
-:::
-
-## Setup Process Overview
-
-Setting up Gruntwork Pipelines for GitLab involves these main steps:
-
-(prerequisite) Complete the [Pipelines Auth setup for GitLab](/2.0/docs/pipelines/installation/viamachineusers#gitlab)
-
-1. [Authorize Your GitLab Group with Gruntwork](#step-1-authorize-your-gitlab-group-with-gruntwork)
-2. [Install required tools (mise, boilerplate)](#step-2-install-required-tools)
-3. [Install Gruntwork Pipelines in Your Repository](#step-3-install-gruntwork-pipelines-in-your-repository)
-4. [Install AWS OIDC Provider and IAM Roles for Pipelines](#step-4-install-aws-oidc-provider-and-iam-roles-for-pipelines)
-5. [Complete the setup](#step-5-complete-the-setup)
-
-## Detailed Setup Instructions
-
-### Step 0: Ensure OIDC configuration and JWKS are publicly accessible
-
-This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step.
-
-1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance.
-2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps.
-
-
-### Step 1: Authorize Your GitLab Group with Gruntwork
-
-To use Gruntwork Pipelines with GitLab, your group needs authorization from Gruntwork:
-
-1. Email your Gruntwork account manager or support@gruntwork.io with:
-
- ```
- GitLab group name(s): $$GITLAB_GROUP_NAME$$ (e.g. acme-io)
- GitLab Issuer URL: $$ISSUER_URL$$ (For most users this is the URL of your GitLab instance e.g. https://gitlab.acme.io. If your instance is not publicly accessible, this should be a separate URL that is publicly accessible per step 0, e.g. https://s3.amazonaws.com/YOUR_BUCKET_NAME/)
- Organization name: $$ORGANIZATION_NAME$$ (e.g. Acme, Inc.)
- ```
-
-2. Wait for confirmation that your group has been authorized.
-
-### Step 2: Install Required Tools
-
-First, you'll need to install [mise](https://mise.jdx.dev/), a powerful environment manager that will help set up the required tools:
-
-1. Install mise by following the [getting started guide](https://mise.jdx.dev/getting-started.html)
-
-2. Activate mise in your shell:
- ```bash
- # For Bash
- echo 'eval "$(~/.local/bin/mise activate bash)"' >> ~/.bashrc
-
- # For Zsh
- echo 'eval "$(~/.local/bin/mise activate zsh)"' >> ~/.zshrc
-
- # For Fish
- echo 'mise activate fish | source' >> ~/.config/fish/config.fish
- ```
-
-3. Install the boilerplate tool, which will generate the project structure:
- ```bash
- # For mise version BEFORE 2025.2.10
- mise plugin add boilerplate https://github.com/gruntwork-io/asdf-boilerplate.git
-
- # For mise version 2025.2.10+
- mise plugin add boilerplate
-
- mise use boilerplate@0.6.0
- ```
-
-4. Verify the installation:
- ```bash
- boilerplate --version
-
- # If that doesn't work, try:
- mise x -- boilerplate --version
-
- # If that still doesn't work, check where boilerplate is installed:
- mise which boilerplate
- ```
-
-### Step 3: Install Gruntwork Pipelines in Your Repository
-
-1. Identify where you want to install Gruntwork Pipelines, for example create a new project/repository in your GitLab group (or use an existing one) named
-
-2. Clone the repository to your local machine if it's not already cloned:
- ```bash
- git clone git@gitlab.com:$$GITLAB_GROUP_NAME$$/$$REPOSITORY_NAME$$.git
- cd $$REPOSITORY_NAME$$
- ```
-3. Create a new branch for your changes:
- ```bash
- git checkout -b gruntwork-pipelines
- ```
-
-4. Download the sample [vars.yaml file](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/blob/main/examples/gitlab-pipelines/vars.yaml) to the root of
-
-4. Edit the `vars.yaml` file to customize it for your environment. If using a custom GitLab instance, update any custom instance variables.
-
-5. `cd` to the root of where you wish to install Gruntwork Pipelines. Run the boilerplate tool to generate your repository structure:
- ```bash
- boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/gitlab-pipelines-infrastructure-live-root/?ref=v3.1.0" --output-folder . --var-file vars.yaml --non-interactive
- ```
-
- If you encounter SSH issues, verify your SSH access to GitHub:
- ```bash
- ssh -T git@github.com
- # or try cloning manually
- git clone git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git
- ```
-
-6. Commit the changes:
- ```bash
- git add .
- git commit -m "[skip ci] Add Gruntwork Pipelines"
- git push origin gruntwork-pipelines
- ```
-
-7. Create a merge request in GitLab and review the changes.
-
-### Step 4: Install AWS OIDC Provider and IAM Roles for Pipelines
-
-1. Navigate to the `_global` folder under each account in your repository and review the Terragrunt files that were created:
- - The GitLab OIDC identity provider in AWS.
-
- :::note
- If using a custom GitLab instance, ensure the `URL` and `audiences` inputs in this configuration are correct.
- :::
-
- - IAM roles for your the account (`root-pipelines-plan` and `root-pipelines-apply`)
-
-2. Apply these configurations to create the required AWS resources:
- ```bash
- cd $$ACCOUNT_NAME$$/_global/
- terragrunt run-all plan
- terragrunt run-all apply
- ```
-
- :::note
-
- In the event you already have an OIDC provider for your SCM in the AWS account you can import the existing one:
-
- ```
- cd _global/$$ACCOUNT_NAME$$/gitlab-pipelines-openid-connect-provider/
- terragrunt import "aws_iam_openid_connect_provider.gitlab" "ARN_OF_EXISTING_OIDC_PROVIDER"
- ```
-
-
- :::
-
-### Step 5: Complete the Setup
-
-1. Return to GitLab and merge the merge request with your changes.
-2. Ensure that `PIPELINES_GITLAB_TOKEN` and `PIPELINES_GITLAB_READ_TOKEN` are set as a CI/CD variables in your group or project if you haven't already (see the [Machine Users setup guide](/2.0/docs/pipelines/installation/viamachineusers#gitlab) for details).
-3. Test your setup by creating a new branch with some sample infrastructure code and creating a merge request.
-
-## Next Steps
-
-After setting up Pipelines, you can:
-
-- [Deploy your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change)
-- [Learn how to run plan and apply operations](/2.0/docs/pipelines/guides/running-plan-apply)
-- [Extend Pipelines with custom actions](/2.0/docs/pipelines/guides/extending-pipelines)
-
-## Troubleshooting
-
-If you encounter issues during setup:
-
-- Ensure your GitLab CI user has the correct permissions to your group and projects
-- Verify that both `PIPELINES_GITLAB_TOKEN` and `PIPELINES_GITLAB_READ_TOKEN` are set correctly as CI/CD variables and are *NOT* marked as protected
-- Confirm your GitLab group has been authorized by Gruntwork for Pipelines usage
-
-For further assistance, contact [support@gruntwork.io](mailto:support@gruntwork.io).
diff --git a/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx
new file mode 100644
index 0000000000..363746c737
--- /dev/null
+++ b/docs/2.0/docs/pipelines/installation/addinggitlabrepo.mdx
@@ -0,0 +1,392 @@
+# Bootstrap Pipelines in a New GitLab Project
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import PersistentCheckbox from '/src/components/PersistentCheckbox';
+import CustomizableValue from '/src/components/CustomizableValue';
+
+To configure Gruntwork Pipelines in a new GitLab project, complete the following steps (which are explained in detail below):
+
+1. (If using a self-hosted GitLab instance) Ensure OIDC configuration and JWKS are publicly accessible.
+2. Create an `infrastructure-live` project.
+3. Configure machine user tokens for GitLab access, or ensure that the appropriate machine user tokens are set up as project or organization secrets.
+4. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments.
+5. Create `.gitlab-ci.yml` to tell your GitLab CI/CD pipeline how to run your pipelines.
+6. Commit and push your changes to your project.
+
+## Ensure OIDC configuration and JWKS are publicly accessible
+
+This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step.
+
+1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance.
+2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps.
+
+:::note Progress Checklist
+
+
+
+:::
+
+## Creating the infrastructure-live project
+
+Creating an `infrastructure-live` project is fairly straightforward. First, create a new project using the official GitLab documentation for [creating repositories](https://docs.gitlab.com/user/project/repository/). Name the project something like `infrastructure-live` and make it private (or internal).
+
+## Configuring SCM Access
+
+Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself).
+
+For GitLab, you'll need to configure SCM access using [machine users](/2.0/docs/pipelines/installation/viamachineusers) with appropriate Personal Access Tokens (PATs).
+
+:::note Progress Checklist
+
+
+
+:::
+
+## Creating Cloud Resources for Pipelines
+
+To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines.
+
+:::note
+
+If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this.
+
+:::
+
+Clone your `infrastructure-live` project repository to your local machine using [Git](https://docs.gitlab.com/user/project/repository/index.html#clone-a-repository).
+
+:::tip
+
+If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads).
+
+:::
+
+For example:
+
+```bash
+git clone git@gitlab.com:acme/infrastructure-live.git
+cd infrastructure-live
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function.
+
+The easiest way to install Boilerplate is to use `mise` to install it.
+
+:::tip
+
+If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html).
+
+:::
+
+```bash
+mise use -g boilerplate@latest
+```
+
+:::tip
+
+If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions.
+
+```bash
+mise ls-remote boilerplate
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+### Cloud-specific bootstrap instructions
+
+The resources that you need provisioned in AWS to start managing resources with Pipelines are:
+
+1. An OpenID Connect (OIDC) provider
+2. An IAM role for Pipelines to assume when running Terragrunt plan commands
+3. An IAM role for Pipelines to assume when running Terragrunt apply commands
+
+For every account you want Pipelines to manage infrastructure in.
+
+:::tip Don't Panic!
+
+This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository.
+
+If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository.
+
+:::
+
+The process that we'll follow to get these resources ready for Pipelines is:
+
+1. Set up the Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines in a single AWS account
+2. Use Terragrunt to provision these resources in your AWS account
+3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Bootstrap your `infrastructure-live` repository
+
+To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+You can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=dev' \
+ --var 'GitLabGroupName=acme' \
+ --var 'GitLabRepoName=infrastructure-live' \
+ --var 'GitLabInstanceURL=https://gitlab.com' \
+ --var 'AWSAccountID=123456789012' \
+ --var 'AWSRegion=us-east-1' \
+ --var 'StateBucketName=my-state-bucket' \
+ --non-interactive
+```
+
+You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag.
+
+```yaml title="vars.yml"
+AccountName: dev
+GitLabGroupName: acme
+GitLabRepoName: infrastructure-live
+GitLabInstanceURL: https://gitlab.com
+AWSAccountID: 123456789012
+AWSRegion: us-east-1
+StateBucketName: my-state-bucket
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+If you're using a self-hosted GitLab instance, you'll want to make sure the issuer is set correctly when calling Boilerplate.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=dev' \
+ --var 'GitLabGroupName=acme' \
+ --var 'GitLabRepoName=infrastructure-live' \
+ --var 'GitLabInstanceURL=https://gitlab.com' \
+ --var 'AWSAccountID=123456789012' \
+ --var 'AWSRegion=us-east-1' \
+ --var 'StateBucketName=my-state-bucket' \
+ --var 'Issuer=$$ISSUER_URL$$' \
+ --non-interactive
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root of the repository after scaffolding should already be set to the versions you want for Terragrunt and OpenTofu):
+
+```bash
+mise install
+```
+
+:::note Progress Checklist
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Provisioning the resources
+
+Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account.
+
+:::tip
+
+Make sure that you're authenticated with AWS locally before proceeding.
+
+You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role.
+
+:::
+
+First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the first AWS account you want to bootstrap.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache plan
+```
+
+:::tip
+
+We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/).
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Next, apply the changes to your account.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache apply
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Optional: Bootstrapping additional AWS accounts
+
+If you have multiple AWS accounts, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process.
+
+For each additional account you want to bootstrap, you'll use Boilerplate in the root of your `infrastructure-live` repository to scaffold out the necessary content for just that account.
+
+:::tip
+
+If you are going to bootstrap more AWS accounts, you'll probably want to commit your existing changes before proceeding.
+
+```bash
+git add .
+git commit -m "Add core Pipelines scaffolding [skip ci]"
+```
+
+The `[skip ci]` in the commit message is just in-case you push your changes up to your repository at this state, as you don't want to trigger Pipelines yet.
+
+:::
+
+Just like before, you'll use Boilerplate to scaffold out the necessary content for just that account.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/infrastructure-live?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+Again, you can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/account?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=prod' \
+ --var 'AWSAccountID=987654321012' \
+ --var 'AWSRegion=us-east-1' \
+ --var 'StateBucketName=my-prod-state-bucket' \
+ --var 'GitLabGroupName=acme' \
+ --var 'GitLabRepoName=infrastructure-live' \
+ --var 'GitLabInstanceURL=https://gitlab.com' \
+ --non-interactive
+```
+
+If you prefer to store the values in a YAML file and pass it to Boilerplate using the `--var-file` flag, you can do so like this:
+
+```yaml title="vars.yml"
+AccountName: prod
+AWSAccountID: 987654321012
+AWSRegion: us-east-1
+StateBucketName: my-prod-state-bucket
+GitLabGroupName: acme
+GitLabRepoName: infrastructure-live
+GitLabInstanceURL: https://gitlab.com
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/gitlab/account?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Once you've scaffolded out the additional accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts.
+
+:::tip
+
+Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it.
+
+:::
+
+For each account you want to bootstrap, you'll need to run the following commands:
+
+```bash
+cd /_global/bootstrap
+terragrunt run --all --non-interactive --provider-cache plan
+terragrunt run --all --non-interactive --provider-cache apply
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+## Commit and push your changes
+
+Commit and push your changes to your repository.
+
+ :::note
+
+You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow.
+
+:::
+
+```bash
+git add .
+git commit -m "Add Pipelines GitLab CI workflow [skip ci]"
+git push
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+🚀 You've successfully added Gruntwork Pipelines to your new repository!
+
+## Next steps
+
+You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) to begin deploying changes.
diff --git a/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md b/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md
new file mode 100644
index 0000000000..14a30f97ff
--- /dev/null
+++ b/docs/2.0/docs/pipelines/installation/addingnewgitlabrepo.md
@@ -0,0 +1,547 @@
+import CustomizableValue from '/src/components/CustomizableValue';
+
+# Creating a New GitLab Project with Pipelines
+
+This guide walks you through the process of setting up a new GitLab Project with the Gruntwork Platform. By the end, you'll have a fully configured GitLab CI/CD pipeline that can create new AWS accounts and deploy infrastructure changes automatically.
+
+:::info
+To use Gruntwork Pipelines in an **existing** GitLab repository, see this [guide](/2.0/docs/pipelines/installation/addinggitlabrepo).
+:::
+
+## Prerequisites
+
+Before you begin, make sure you have:
+
+- Basic familiarity with Git, GitLab, and infrastructure as code concepts
+- Completed the [AWS Landing Zone setup](/2.0/docs/accountfactory/prerequisites/awslandingzone)
+- Have programmatic access to the AWS accounts created in the [AWS Landing Zone setup](/2.0/docs/accountfactory/prerequisites/awslandingzone)
+- Completed the [Pipelines Auth setup for GitLab](/2.0/docs/pipelines/installation/viamachineusers#gitlab) and setup a machine user with appropriate PAT tokens
+- Local access to Gruntwork's GitHub repositories, specifically the [architecture catalog](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/)
+
+
+Additional setup for **custom GitLab instances only**
+
+### Fork the Pipelines workflow project
+
+You must [fork](https://docs.gitlab.com/user/project/repository/forking_workflow/#create-a-fork) Gruntwork's public [Pipelines workflow project](https://gitlab.com/gruntwork-io/pipelines-workflows) into your own GitLab instance.
+This is necessary because Gruntwork Pipelines uses [GitLab CI/CD components](/2.0/docs/pipelines/architecture/ci-workflows), and GitLab requires components to reside within the [same GitLab instance as the project referencing them](https://docs.gitlab.com/ci/components/#use-a-component).
+
+When creating the fork, we recommend configuring it as a public mirror of the original Gruntwork project and ensuring that tags are included.
+
+### Ensure OIDC configuration and JWKS are publicly accessible
+
+This step only applies if you are using a self-hosted GitLab instance that is not accessible from the public internet. If you are using GitLab.com or a self-hosted instance that is publicly accessible, you can skip this step.
+
+1. [Follow GitLab's instructions](https://docs.gitlab.com/ci/cloud_services/aws/#configure-a-non-public-gitlab-instance) for hosting your OIDC configuration and JWKS in a public location (e.g. S3 Bucket). This is necessary for both Gruntwork and the AWS OIDC provider to access the GitLab OIDC configuration and JWKS when authenticating JWT's generated by your custom instance.
+2. Note the (stored as `ci_id_tokens_issuer_url` in your `gitlab.rb` file per GitLab's instructions) generated above for reuse in the next steps.
+
+
+1. Create a new GitLab project for your `infrastructure-live-root` repository.
+1. Install dependencies.
+1. Configure the variables required to run the infrastructure-live-root boilerplate template.
+1. Create your `infrastructure-live-root` repository contents using Gruntwork's architecture-catalog template.
+1. Apply the account baselines to your AWS accounts.
+
+
+## Create a new infrastructure-live-root
+
+### Authorize Your GitLab Group with Gruntwork
+
+To use Gruntwork Pipelines with GitLab, your group needs authorization from Gruntwork. Email your Gruntwork account manager or support@gruntwork.io with:
+
+ ```
+ GitLab group name(s): $$GITLAB_GROUP_NAME$$ (e.g. acme-io)
+ GitLab Issuer URL: $$ISSUER_URL$$ (For most users this is the URL of your GitLab instance e.g. https://gitlab.acme.io, if your instance is not publicly accessible, this should be a separate URL that is publicly accessible per step 0, e.g. https://s3.amazonaws.com/YOUR_BUCKET_NAME/)
+ Organization name: $$ORGANIZATION_NAME$$ (e.g. Acme, Inc.)
+ ```
+
+Continue with the rest of the guide while you await confirmation when your group has been authorized.
+
+### Create a new GitLab project
+
+1. Navigate to the group.
+1. Click the **New Project** button.
+1. Enter a name for the project. e.g. infrastructure-live-root
+1. Click **Create Project**.
+1. Clone the project to your local machine.
+1. Navigate to the project directory.
+1. Create a new branch `bootstrap-repository`.
+
+### Install dependencies
+
+1. Install [mise](https://mise.jdx.dev/getting-started.html) on your machine.
+1. Activate mise in your shell:
+
+ ```bash
+ # For Bash
+ echo 'eval "$(~/.local/bin/mise activate bash)"' >> ~/.bashrc
+
+ # For Zsh
+ echo 'eval "$(~/.local/bin/mise activate zsh)"' >> ~/.zshrc
+
+ # For Fish
+ echo 'mise activate fish | source' >> ~/.config/fish/config.fish
+ ```
+
+1. Add the following to a .mise.toml file in the root of your project:
+
+ ```toml title=".mise.toml"
+ [tools]
+ boilerplate = "0.8.1"
+ opentofu = "1.10.0"
+ terragrunt = "0.81.6"
+ awscli = "latest"
+ ```
+
+1. Run `mise install`.
+
+
+### Bootstrap the repository
+
+Gruntwork provides a boilerplate [template](https://github.com/gruntwork-io/terraform-aws-architecture-catalog/tree/main/templates/devops-foundations-infrastructure-live-root) that incorporates best practices while allowing for customization. The template is designed to scaffold a best-practices Terragrunt configurations. It includes patterns for module defaults, global variables, and account baselines. Additionally, it integrates Gruntwork Pipelines.
+
+#### Configure the variables required to run the boilerplate template
+
+Copy the content below to a `vars.yaml` file in the root of your project and update the `` values with your own.
+
+```yaml title="vars.yaml"
+SCMProvider: GitLab
+
+# The GitLab group to use for the infrastructure repositories. This should include any additional sub-groups in the name
+# Example: acme/prod
+SCMProviderGroup: $$GITLAB_GROUP_NAME$$
+
+# The GitLab project to use for the infrastructure-live repository.
+SCMProviderRepo: infrastructure-live-root
+
+# The base URL of your GitLab group repos. E.g., gitlab.com/
+RepoBaseUrl: $$GITLAB_GROUP_REPO_BASE_URL$$
+
+# The name of the branch to deploy to.
+# Example: main
+DeployBranchName: $$DEPLOY_BRANCH_NAME$$
+
+# The AWS account ID for the management account
+# Example: "123456789012"
+AwsManagementAccountId: $$AWS_MANAGEMENT_ACCOUNT_ID$$
+
+# The AWS account ID for the security account
+# Example: "123456789013"
+AwsSecurityAccountId: $$AWS_SECURITY_ACCOUNT_ID$$
+
+# The AWS account ID for the logs account
+# Example: "123456789014"
+AwsLogsAccountId: $$AWS_LOGS_ACCOUNT_ID$$
+
+# The AWS account ID for the shared account
+# Example: "123456789015"
+AwsSharedAccountId: $$AWS_SHARED_ACCOUNT_ID$$
+
+# The AWS account Email for the logs account
+# Example: logs@acme.com
+AwsLogsAccountEmail: $$AWS_LOGS_ACCOUNT_EMAIL$$
+
+# The AWS account Email for the management account
+# Example: management@acme.com
+AwsManagementAccountEmail: $$AWS_MANAGEMENT_ACCOUNT_EMAIL$$
+
+# The AWS account Email for the security account
+# Example: security@acme.com
+AwsSecurityAccountEmail: $$AWS_SECURITY_ACCOUNT_EMAIL$$
+
+# The AWS account Email for the shared account
+# Example: shared@acme.com
+AwsSharedAccountEmail: $$AWS_SHARED_ACCOUNT_EMAIL$$
+
+# The name prefix to use for creating resources e.g S3 bucket for OpenTofu state files
+# Example: acme
+OrgNamePrefix: $$ORG_NAME_PREFIX$$
+
+# The default region for AWS Resources
+# Example: us-east-1
+DefaultRegion: $$DEFAULT_REGION$$
+
+################################################################################
+# OPTIONAL VARIABLES WITH THEIR DEFAULT VALUES. UNCOMMENT AND MODIFY IF NEEDED.
+################################################################################
+
+# List of the git repositories to populate for the catalog
+# CatalogRepositories:
+# - github.com/gruntwork-io/terraform-aws-service-catalog
+
+# The AWS partition to use. Options: aws, aws-us-gov
+# AWSPartition: aws
+
+# The name of the IAM role to use for the plan job.
+# PlanIAMRoleName: root-pipelines-plan
+
+# The name of the IAM role to use for the apply job.
+# ApplyIAMRoleName: root-pipelines-apply
+
+# The default tags to apply to all resources.
+# DefaultTags:
+# "{{ .OrgNamePrefix }}:Team": "DevOps"
+
+# The version for terraform-aws-security module to use for OIDC provider and roles provisioning
+# SecurityModulesVersion: v0.75.18
+
+# The URL of the custom SCM provider instance. Set this if you are using a custom instance of GitLab.
+# CustomSCMProviderInstanceURL: https://gitlab.example.io
+
+# The relative path from the host server to the custom pipelines workflow repository. Set this if you are using a custom/forked instance of the pipelines workflow.
+# CustomWorkflowHostRelativePath: pipelines-workflows
+```
+
+#### Generate the repository contents
+
+1. Run the following command, from the root of your project, to generate the `infrastructure-live-root` repository contents:
+
+
+ ```bash
+ boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/devops-foundations-infrastructure-live-root/?ref=main" --output-folder . --var-file vars.yaml --non-interactive
+ ```
+
+ This command adds all code required to set up your `infrastructure-live-root` repository.
+1. Remove the boilerplate dependency from the `mise.toml` file. It is no longer needed.
+
+1. Commit your local changes and push them to the `bootstrap-repository` branch.
+
+ ```bash
+ git add .
+ git commit -m "Bootstrap infrastructure-live-root repository initial commit [skip ci]"
+ git push origin bootstrap-repository
+ ```
+
+ Skipping the CI/CD process for now; you will manually apply the infrastructure baselines to your AWS accounts in a later step.
+
+1. Create a new merge request for the `bootstrap-repository` branch. Review the changes to understand what will be applied to your AWS accounts. The generated files fall under the following categories:
+
+ - GitLab Pipelines workflow file
+ - Gruntwork Pipelines configuration files
+ - Module defaults files for infrastructure code
+ - Account baselines and GitLab OIDC module scaffolding files for your core AWS accounts: management, security, logs and shared.
+
+### Apply the account baselines to your AWS accounts
+
+You will manually `terragrunt apply` the generated infrastructure baselines to get your accounts bootstrapped **before** merging this content into your main branch.
+
+:::tip
+You can utilize the AWS SSO Portal to obtain temporary AWS credentials necessary for subsequent steps:
+
+1. Sign in to the Portal page and select your preferred account to unveil the roles accessible to your SSO user.
+1. Navigate to the "Access keys" tab adjacent to the "AWSAdministratorAccess" role.
+1. Copy the "AWS environment variables" provided and paste them into your terminal for usage.
+:::
+
+
+1. [ ] Apply infrastructure changes in the **management** account
+
+ 1. - [ ] Obtain AWS CLI Administrator credentials for the management account
+
+ 1. - [ ] Navigate to the management account folder
+
+ ```bash
+ cd management/
+ ```
+
+ 1. - [ ] Using your credentials, run `terragrunt plan`.
+
+ ```bash
+ terragrunt run --all plan --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After the plan succeeds, apply the changes:
+
+ ```bash
+ terragrunt run --all apply --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. The lock files will be committed in the final step of the setup. e.g.
+
+ ```bash
+ terragrunt run --all providers -- lock -platform=darwin_amd64 -platform=linux_amd64
+ ```
+
+ 1. - [ ] Update Permissions for Account Factory Portfolio
+
+ The account factory pipeline _will fail_ until you grant the pipelines roles (`root-pipelines-plan` and `root-pipelines-apply`) access to the portfolio. This step **must be done after** you provision the pipelines roles in the management account (where control tower is set up).
+
+ Access to the portfolio is separate from IAM access, it **must** be granted in the Service Catalog console.
+
+ #### **Steps to grant access**
+
+ To grant access to the Account Factory Portfolio, you **must** be an individual with Service Catalog administrative permissions.
+
+ 1. Log into the management AWS account
+ 1. Go into the Service Catalog console
+ 1. Ensure you are in your default region(control-tower region)
+ 1. Select the **Portfolios** option in **Administration** from the left side navigation panel
+ 1. Click on the portfolio named **AWS Control Tower Account Factory Portfolio**
+ 1. Select the **Access** tab
+ 1. Click the **Grant access** button
+ 1. In the **Access type** section, leave the default value of **IAM Principal**
+ 1. Select the **Roles** tab in the lower section
+ 1. Enter `root-pipelines` into the search bar, there should be two results (`root-pipelines-plan` and `root-pipelines-apply`). Click the checkbox to the left of each role name.
+ 1. Click the **Grant access** button in the lower right hand corner
+
+ 1. - [ ] Increase Account Quota Limit (OPTIONAL)
+
+ Note that DevOps Foundations makes it very convenient, and therefore likely, that you will encounter one of the soft limits imposed by AWS on the number of accounts you can create.
+
+ You may need to request a limit increase for the number of accounts you can create in the management account, as the default is currently 10 accounts.
+
+ To request an increase to this limit, search for "Organizations" in the AWS management console [here](https://console.aws.amazon.com/servicequotas/home/dashboard) and request a limit increase to a value that makes sense for your organization.
+
+1. - [ ] Apply infrastructure changes in the **logs** account
+
+ 1. - [ ] Obtain AWS CLI Administrator credentials for the logs account
+ 1. - [ ] Navigate to the logs account folder
+
+ ```bash
+ cd ../logs/
+ ```
+
+ 1. - [ ] Using your credentials, run `terragrunt plan`.
+
+ ```bash
+ terragrunt run --all plan --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After the plan succeeds, apply the changes:
+
+ ```bash
+ terragrunt run --all apply --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. e.g.
+
+ ```bash
+ terragrunt run --all providers lock -platform=darwin_amd64 -platform=linux_amd64
+ ```
+
+1. - [ ] Apply infrastructure changes in the **security** account
+
+ 1. - [ ] Obtain AWS CLI Administrator credentials for the security account
+ 1. - [ ] Navigate to the security account folder
+
+ ```bash
+ cd ../security/
+ ```
+
+ 1. - [ ] Using your credentials, run `terragrunt plan`.
+
+ ```bash
+ terragrunt run --all plan --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After the plan succeeds, apply the changes:
+
+ ```bash
+ terragrunt run --all apply --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. e.g.
+
+ ```bash
+ terragrunt run --all providers lock -platform=darwin_amd64 -platform=linux_amd64
+ ```
+
+1. - [ ] Apply infrastructure changes in the **shared** account
+
+ 1. - [ ] Obtain AWS CLI Administrator credentials for the shared account. You may need to grant your user access to the `AWSAdministratorAccess` permission set in the shared account from the management account's Identity Center Admin console.
+ 1. - [ ] Using your credentials, create a service role
+
+ ```bash
+ aws iam create-service-linked-role --aws-service-name autoscaling.amazonaws.com
+ ```
+
+ 1. - [ ] Navigate to the shared account folder
+
+ ```bash
+ cd ../shared/
+ ```
+
+ 1. - [ ] Using your credentials, run `terragrunt plan`.
+
+ ```bash
+ terragrunt run --all plan --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After the plan succeeds, apply the changes:
+
+ ```bash
+ terragrunt run --all apply --terragrunt-non-interactive
+ ```
+
+ 1. - [ ] After applying the changes, make sure to lock providers in your `.terraform.lock.hcl` files. e.g.
+
+ ```bash
+ terragrunt run --all providers lock -platform=darwin_amd64 -platform=linux_amd64
+ ```
+
+1. - [ ] Commit your local changes and push them to the `bootstrap-repository` branch.
+
+ ```bash
+ cd ..
+ git add .
+ git commit -m "Bootstrap infrastructure-live-root repository final commit [skip ci]"
+ git push origin bootstrap-repository
+ ```
+
+1. - [ ] Merge the open merge request. **Ensure [skip ci] is present in the commit message.**
+
+
+## Create a new infrastructure-live-access-control (optional)
+
+### Create a new GitLab project
+
+1. Navigate to the group.
+1. Click the **New Project** button.
+1. Enter the name for the project as `infrastructure-live-access-control`.
+1. Click **Create Project**.
+1. Clone the project to your local machine.
+1. Navigate to the project directory.
+1. Create a new branch `bootstrap-repository`.
+
+### Install dependencies
+
+Run `mise install boilerplate@0.8.1` to install the boilerplate tool.
+
+### Bootstrap the repository
+
+#### Configure the variables required to run the boilerplate template
+
+Copy the content below to a `vars.yaml` file in the root of your project and update the customizable values as needed.
+
+```yaml title="vars.yaml"
+SCMProvider: GitLab
+
+# The GitLab group to use for the infrastructure repositories. This should include any additional sub-groups in the name
+# Example: acme/prod
+SCMProviderGroup: $$GITLAB_GROUP_NAME$$
+
+# The GitLab project to use for the infrastructure-live repository.
+SCMProviderRepo: infrastructure-live-access-control
+
+# The name of the branch to deploy to.
+# Example: main
+DeployBranchName: $$DEPLOY_BRANCH_NAME$$
+
+# The name prefix to use for creating resources e.g S3 bucket for OpenTofu state files
+# Example: acme
+OrgNamePrefix: $$ORG_NAME_PREFIX$$
+
+# The default region for AWS Resources
+# Example: us-east-1
+DefaultRegion: $$DEFAULT_REGION$$
+
+################################################################################
+# OPTIONAL VARIABLES WITH THEIR DEFAULT VALUES. UNCOMMENT AND MODIFY IF NEEDED.
+################################################################################
+
+# The AWS partition to use.
+# AWSPartition: aws
+```
+
+#### Generate the repository contents
+
+1. Run the following command, from the root of your project, to generate the `infrastructure-live-access-control` repository contents:
+
+
+ ```bash
+ boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/devops-foundations-infrastructure-live-access-control/?ref=main" --output-folder . --var-file vars.yaml --non-interactive
+ ```
+
+ This command adds all code required to set up your `infrastructure-live-access-control` repository. The generated files fall under the following categories:
+
+ - GitLab Pipelines workflow file
+ - Gruntwork Pipelines configuration files
+ - Module defaults files for GitLab OIDC roles and policies
+
+
+2. Commit your local changes and push them to the `bootstrap-repository` branch.
+
+ ```bash
+ git add .
+ git commit -m "Bootstrap infrastructure-live-access-control repository [skip ci]"
+ git push origin bootstrap-repository
+ ```
+
+ Skipping the CI/CD process now because there is no infrastructure to apply; repository simply contains the GitLab OIDC role module defaults to enable GitLab OIDC authentication from repositories other than `infrastructure-live-root`.
+
+3. Create a new merge request for the `bootstrap-repository` branch. Review the changes to understand the GitLab OIDC role module defaults.
+4. Merge the open merge request. **Ensure [skip ci] is present in the commit message.**
+
+## Create a new infrastructure-catalog (optional)
+
+The `infrastructure-catalog` repository is a collection of modules that can be used to build your infrastructure. It is a great way to share modules with your team and across your organization. Learn more about the [Developer Self-Service](/2.0/docs/overview/concepts/developer-self-service) concept.
+
+### Create a new GitLab project
+
+1. Navigate to the group.
+1. Click the **New Project** button.
+1. Enter the name for the project as `infrastructure-catalog`.
+1. Click **Create Project**.
+1. Clone the project to your local machine.
+1. Navigate to the project directory.
+1. Create a new branch `bootstrap-repository`.
+
+### Install dependencies
+
+Run `mise install boilerplate@0.8.1` to install the boilerplate tool.
+
+### Bootstrap the repository
+
+#### Configure the variables required to run the boilerplate template
+
+Copy the content below to a `vars.yaml` file in the root of your project and update the customizable values as needed.
+
+```yaml title="vars.yaml"
+# The name of the repository to use for the catalog.
+InfraModulesRepoName: infrastructure-catalog
+
+# The version of the Gruntwork Service Catalog to use. https://github.com/gruntwork-io/terraform-aws-service-catalog
+ServiceCatalogVersion: v0.111.2
+
+# The version of the Gruntwork VPC module to use. https://github.com/gruntwork-io/terraform-aws-vpc
+VpcVersion: v0.26.22
+
+# The default region for AWS Resources
+# Example: us-east-1
+DefaultRegion: $$DEFAULT_REGION$$
+
+################################################################################
+# OPTIONAL VARIABLES WITH THEIR DEFAULT VALUES. UNCOMMENT AND MODIFY IF NEEDED.
+################################################################################
+
+# The base URL of the Organization to use for the catalog.
+# If you are using Gruntwork's RepoCopier tool, this should be the base URL of the repository you are copying from.
+# RepoBaseUrl: github.com/gruntwork-io
+
+# The name prefix to use for the Gruntwork RepoCopier copied repositories.
+# Example: gruntwork-io-
+# GWCopiedReposNamePrefix:
+```
+
+
+#### Generate the repository contents
+
+1. Run the following command, from the root of your project, to generate the `infrastructure-catalog` repository contents:
+
+
+ ```bash
+ boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/devops-foundations-infrastructure-modules/?ref=main" --output-folder . --var-file vars.yaml --non-interactive
+ ```
+
+ This command adds some code required to set up your `infrastructure-catalog` repository. The generated files are some usable modules for your infrastructure.
+
+1. Commit your local changes and push them to the `bootstrap-repository` branch.
+
+ ```bash
+ git add .
+ git commit -m "Bootstrap infrastructure-catalog repository"
+ git push origin bootstrap-repository
+ ```
+
+1. Create a new merge request for the `bootstrap-repository` branch. Review the changes to understand the example Service Catalog modules.
+1. Merge the open merge request.
diff --git a/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx
new file mode 100644
index 0000000000..1db7c8a970
--- /dev/null
+++ b/docs/2.0/docs/pipelines/installation/addingnewrepo.mdx
@@ -0,0 +1,969 @@
+# Bootstrap Pipelines in a New GitHub Repository
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import PersistentCheckbox from '/src/components/PersistentCheckbox';
+
+To configure Gruntwork Pipelines in a new GitHub repository, complete the following steps (which are explained in detail below):
+
+1. Create an `infrastructure-live` repository.
+2. Configure the Gruntwork.io GitHub App to authorize your `infrastructure-live` repository, or ensure that the appropriate machine user tokens are set up as repository or organization secrets.
+3. Create `.gruntwork` HCL configurations to tell Pipelines how to authenticate in your environments.
+4. Create `.github/workflows/pipelines.yml` to tell your GitHub Actions workflow how to run your pipelines.
+5. Commit and push your changes to your repository.
+
+## Creating the infrastructure-live repository
+
+Creating an `infrastructure-live` repository is fairly straightforward. First, create a new repository using the official GitHub documentation for [creating repositories](https://docs.github.com/en/repositories/creating-and-managing-repositories/creating-a-new-repository). Name the repository something like `infrastructure-live` and make it private (or internal).
+
+## Configuring SCM Access
+
+Pipelines needs the ability to interact with Source Control Management (SCM) platforms to fetch resources (e.g. IaC code, reusable CI/CD code and the Pipelines binary itself).
+
+There are two ways to configure SCM access for Pipelines:
+
+1. Using the [Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp#configuration) (recommended for most GitHub users).
+2. Using a [machine user](/2.0/docs/pipelines/installation/viamachineusers) (recommended for GitHub users who cannot use the GitHub App).
+
+:::note Progress Checklist
+
+
+
+:::
+
+## Creating Cloud Resources for Pipelines
+
+To start using Pipelines, you'll need to ensure that requisite cloud resources are provisioned in your cloud provider(s) to start managing your infrastructure with Pipelines.
+
+:::note
+
+If you are using the [Gruntwork Account Factory](/2.0/docs/accountfactory/architecture), this will be done automatically during onboarding and in the process of [vending every new AWS account](/2.0/docs/accountfactory/guides/vend-aws-account), so you don't need to worry about this.
+
+:::
+
+Clone your `infrastructure-live` repository to your local machine using [Git](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).
+
+:::tip
+
+If you don't have Git installed, you can install it by following the official guide for [Git installation](https://git-scm.com/downloads).
+
+:::
+
+For example:
+
+```bash
+git clone git@github.com:acme/infrastructure-live.git
+cd infrastructure-live
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary IaC code to provision the infrastructure necessary for Pipelines to function.
+
+The easiest way to install Boilerplate is to use `mise` to install it.
+
+:::tip
+
+If you don't have `mise` installed, you can install it by following the official guide for [mise installation](https://mise.jdx.dev/getting-started.html).
+
+:::
+
+```bash
+mise use -g boilerplate@latest
+```
+
+:::tip
+
+If you'd rather install a specific version of Boilerplate, you can use the `ls-remote` command to list the available versions.
+
+```bash
+mise ls-remote boilerplate
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+### Cloud-specific bootstrap instructions
+
+
+
+
+The resources that you need provisioned in AWS to start managing resources with Pipelines are:
+
+1. An OpenID Connect (OIDC) provider
+2. An IAM role for Pipelines to assume when running Terragrunt plan commands
+3. An IAM role for Pipelines to assume when running Terragrunt apply commands
+
+For every account you want Pipelines to manage infrastructure in.
+
+:::tip Don't Panic!
+
+This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository.
+
+If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository.
+
+:::
+
+The process that we'll follow to get these resources ready for Pipelines is:
+
+1. Set up the Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines in a single AWS account
+2. Use Terragrunt to provision these resources in your AWS account
+3. (Optionally) Bootstrap additional AWS accounts until all your AWS accounts are ready for Pipelines
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Bootstrap your `infrastructure-live` repository
+
+To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+You can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=dev' \
+ --var 'GitHubOrgName=acme' \
+ --var 'GitHubRepoName=infrastructure-live' \
+ --var 'AWSAccountID=123456789012' \
+ --var 'AWSRegion=us-east-1' \
+ --var 'StateBucketName=my-state-bucket' \
+ --non-interactive
+```
+
+You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag.
+
+```yaml title="vars.yml"
+AccountName: dev
+GitHubOrgName: acme
+GitHubRepoName: infrastructure-live
+AWSAccountID: 123456789012
+AWSRegion: us-east-1
+StateBucketName: my-state-bucket
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root of the repository after scaffolding should already be set to the versions you want for Terragrunt and OpenTofu):
+
+```bash
+mise install
+```
+
+:::note Progress Checklist
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Provisioning the resources
+
+Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your AWS account.
+
+:::tip
+
+Make sure that you're authenticated with AWS locally before proceeding.
+
+You can follow the documentation [here](https://search.opentofu.org/provider/hashicorp/aws/latest#authentication-and-configuration) to authenticate with the AWS provider. You are advised to choose an authentication method that doesn't require any hard-coded credentials, like assuming an IAM role.
+
+:::
+
+First, make sure that everything is set up correctly by running a plan in the `bootstrap` directory in `name-of-account/_global` where `name-of-account` is the name of the first AWS account you want to bootstrap.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache plan
+```
+
+:::tip
+
+We're using the `--provider-cache` flag here to ensure that we don't re-download the AWS provider on every run by leveraging the [Terragrunt Provider Cache Server](https://terragrunt.gruntwork.io/docs/features/provider-cache-server/).
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Next, apply the changes to your account.
+
+```bash title="name-of-account/_global/bootstrap"
+terragrunt run --all --non-interactive --provider-cache apply
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Optional: Bootstrapping additional AWS accounts
+
+If you have multiple AWS accounts, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process.
+
+For each additional account you want to bootstrap, you'll use Boilerplate in the root of your `infrastructure-live` repository to scaffold out the necessary content for just that account.
+
+:::tip
+
+If you are going to bootstrap more AWS accounts, you'll probably want to commit your existing changes before proceeding.
+
+```bash
+git add .
+git commit -m "Add core Pipelines scaffolding [skip ci]"
+```
+
+The `[skip ci]` in the commit message is just in-case you push your changes up to your repository at this state, as you don't want to trigger Pipelines yet.
+
+:::
+
+Just like before, you'll use Boilerplate to scaffold out the necessary content for just that account.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/infrastructure-live?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+Again, you can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=prod' \
+ --var 'AWSAccountID=987654321012' \
+ --var 'AWSRegion=us-east-1' \
+ --var 'StateBucketName=my-prod-state-bucket' \
+ --var 'GitHubOrgName=acme' \
+ --var 'GitHubRepoName=infrastructure-live' \
+ --non-interactive
+```
+
+If you prefer to store the values in a YAML file and pass it to Boilerplate using the `--var-file` flag, you can do so like this:
+
+```yaml title="vars.yml"
+AccountName: prod
+AWSAccountID: 987654321012
+AWSRegion: us-east-1
+StateBucketName: my-prod-state-bucket
+GitHubOrgName: acme
+GitHubRepoName: infrastructure-live
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/aws/github/account?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Once you've scaffolded out the additional accounts you want to bootstrap, you can use Terragrunt to provision the resources in each of these accounts.
+
+:::tip
+
+Make sure that you authenticate to each AWS account you are bootstrapping using AWS credentials for that account before you attempt to provision resources in it.
+
+:::
+
+For each account you want to bootstrap, you'll need to run the following commands:
+
+```bash
+cd /_global/bootstrap
+terragrunt run --all --non-interactive --provider-cache plan
+terragrunt run --all --non-interactive --provider-cache apply
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+
+
+
+The resources that you need provisioned in Azure to start managing resources with Pipelines are:
+
+1. An Azure Resource Group for OpenTofu state resources
+ 1. An Azure Storage Account in that resource group for OpenTofu state storage
+ 1. An Azure Storage Container in that storage account for OpenTofu state storage
+2. An Entra ID Application to use for plans
+ 1. A Flexible Federated Identity Credential for the application to authenticate with your repository on any branch
+ 2. A Service Principal for the application to be used in role assignments
+ 1. A role assignment for the service principal to access the Azure subscription
+ 2. A role assignment for the service principal to access the Azure Storage Account
+3. An Entra ID Application to use for applies
+ 1. A Federated Identity Credential for the application to authenticate with your repository on the deploy branch
+ 2. A Service Principal for the application to be used in role assignments
+ 1. A role assignment for the service principal to access the Azure subscription
+
+:::tip Don't Panic!
+
+This may seem like a lot to set up, but the content you need to add to your `infrastructure-live` repository is minimal. The majority of the work will be pulled from a reusable catalog that you'll reference in your `infrastructure-live` repository.
+
+If you want to peruse the catalog that's used in the bootstrap process, you can take a look at the [terragrunt-scale-catalog](https://github.com/gruntwork-io/terragrunt-scale-catalog) repository.
+
+:::
+
+The process that we'll follow to get these resources ready for Pipelines is:
+
+1. Set up these bootstrap resources by creating some Terragrunt configurations in your `infrastructure-live` repository for bootstrapping Pipelines in a single Azure subscription
+2. Use Terragrunt to provision these resources in your Azure subscription
+3. Finalizing Terragrunt configurations using the bootstrap resources we just provisioned
+4. Pull the bootstrap resources into state, now that we have configured a remote state backend
+5. (Optionally) Bootstrap additional Azure subscriptions until all your Azure subscriptions are ready for Pipelines
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Bootstrap your `infrastructure-live` repository
+
+To bootstrap your `infrastructure-live` repository, we'll use Boilerplate to scaffold it with the necessary content for Pipelines to function.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+You can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=dev' \
+ --var 'GitHubOrgName=acme' \
+ --var 'GitHubRepoName=infrastructure-live' \
+ --var 'SubscriptionName=dev' \
+ --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \
+ --var 'AzureSubscriptionID=11111111-1111-1111-1111-111111111111' \
+ --var 'AzureLocation=East US' \
+ --var 'StateResourceGroupName=pipelines-rg' \
+ --var 'StateStorageAccountName=mysa' \
+ --var 'StateStorageContainerName=tfstate' \
+ --non-interactive
+```
+
+You can also choose to store these values in a YAML file and pass it to Boilerplate using the `--var-file` flag.
+
+```yaml title="vars.yml"
+AccountName: dev
+GitHubOrgName: acme
+GitHubRepoName: infrastructure-live
+AzureTenantID: 00000000-0000-0000-0000-000000000000
+AzureSubscriptionID: 11111111-1111-1111-1111-111111111111
+AzureLocation: East US
+StateResourceGroupName: pipelines-rg
+StateStorageAccountName: my-storage-account
+StateStorageContainerName: tfstate
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/infrastructure-live?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+:::
+
+:::note Progress Checklist
+
+:::
+
+Next, install Terragrunt and OpenTofu locally (the `.mise.toml` file in the root of the repository after scaffolding should already be set to the versions you want for Terragrunt and OpenTofu):
+
+```bash
+mise install
+```
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Provisioning the resources
+
+Once you've set up the Terragrunt configurations, you can use Terragrunt to provision the resources in your Azure subscription.
+
+If you haven't already, you'll want to authenticate to Azure using the `az` CLI.
+
+```bash
+az login
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+
+To dynamically configure the Azure provider with a given tenant ID and subscription ID, ensure that you are exporting the following environment variables if you haven't the values via the `az` CLI:
+
+- `ARM_TENANT_ID`
+- `ARM_SUBSCRIPTION_ID`
+
+For example:
+
+```bash
+export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export ARM_SUBSCRIPTION_ID="11111111-1111-1111-1111-111111111111"
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+First, make sure that everything is set up correctly by running a plan in the subscription directory.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache plan
+```
+
+:::tip
+
+We're using the `--provider-cache` flag here to ensure that we don't re-download the Azure provider on every run to speed up the process.
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Next, apply the changes to your subscription.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply
+```
+
+:::tip
+
+We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state.
+
+:::
+
+:::note Progress Checklist
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Finalizing Terragrunt configurations
+
+Once you've provisioned the resources in your Azure subscription, you can finalize the Terragrunt configurations using the bootstrap resources we just provisioned.
+
+First, edit the `root.hcl` file in the root of your `infrastructure-live` repository to leverage the storage account we just provisioned.
+
+```hcl title="root.hcl"
+locals {
+ sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl"))
+
+ state_resource_group_name = local.sub_hcl.locals.state_resource_group_name
+ state_storage_account_name = local.sub_hcl.locals.state_storage_account_name
+ state_storage_container_name = local.sub_hcl.locals.state_storage_container_name
+}
+
+# FIXME: Uncomment the code below when you've successfully bootstrapped Pipelines state.
+#
+# remote_state {
+# backend = "azurerm"
+# generate = {
+# path = "backend.tf"
+# if_exists = "overwrite"
+# }
+# config = {
+# resource_group_name = local.state_resource_group_name
+# storage_account_name = local.state_storage_account_name
+# container_name = local.state_storage_container_name
+# key = "${path_relative_to_include()}/tofu.tfstate"
+# }
+# }
+
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+:::
+
+Next, finalize the `.gruntwork/environment-.hcl` file in the root of your `infrastructure-live` repository to reference the IDs for the applications we just provisioned.
+
+```hcl title=".gruntwork/environment-.hcl"
+environment "dev" {
+ filter {
+ paths = ["dev/*"]
+ }
+
+ authentication {
+ azure_oidc {
+ tenant_id = "00000000-0000-0000-0000-000000000000"
+ subscription_id = "11111111-1111-1111-1111-111111111111"
+
+ plan_client_id = "" # FIXME: Fill in the client ID for the plan application after bootstrapping
+ apply_client_id = "" # FIXME: Fill in the client ID for the apply application after bootstrapping
+ }
+ }
+}
+```
+
+You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`.
+
+```bash
+terragrunt stack output
+```
+
+The relevant bits that you want to extract from the stack output are the following:
+
+```hcl
+bootstrap = {
+ apply_app = {
+ client_id = "33333333-3333-3333-3333-333333333333"
+ }
+ plan_app = {
+ client_id = "44444444-4444-4444-4444-444444444444"
+ }
+}
+```
+
+You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file.
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
Pulling the resources into state
+
+Once you've provisioned the resources in your Azure subscription, you can pull the resources into state using the storage account we just provisioned.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy
+```
+
+:::tip
+
+We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state.
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+{/* We're using an h3 tag here instead of a markdown heading to avoid adding content to the ToC that won't work when switching between tabs */}
+
+
+If you have multiple Azure subscriptions, and you want to bootstrap them as well, you can do so by following a similar, but slightly condensed process.
+
+For each additional subscription you want to bootstrap, you'll use Boilerplate in the root of your `infrastructure-live` repository to scaffold out the necessary content for just that subscription.
+
+:::tip
+
+If you are going to bootstrap more Azure subscriptions, you'll probably want to commit your existing changes before proceeding.
+
+```bash
+git add .
+git commit -m "Add additional Azure subscriptions [skip ci]"
+```
+
+:::
+
+Just like before, you'll use Boilerplate to scaffold out the necessary content for just that subscription.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \
+ --output-folder .
+```
+
+:::tip
+
+Again, you can just reply `y` to all the prompts to include dependencies, and accept defaults unless you want to customize something.
+
+:::
+
+Alternatively, you could run Boilerplate non-interactively by passing the `--non-interactive` flag. You'll need to supply the relevant values for required variables in that case.
+
+e.g.
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \
+ --output-folder . \
+ --var 'AccountName=prod' \
+ --var 'GitHubOrgName=acme' \
+ --var 'GitHubRepoName=infrastructure-live' \
+ --var 'SubscriptionName=prod' \
+ --var 'AzureTenantID=00000000-0000-0000-0000-000000000000' \
+ --var 'AzureSubscriptionID=99999999-9999-9999-9999-999999999999' \
+ --var 'AzureLocation=East US' \
+ --var 'StateResourceGroupName=pipelines-rg' \
+ --var 'StateStorageAccountName=myprodsa' \
+ --var 'StateStorageContainerName=tfstate' \
+ --non-interactive
+```
+
+If you prefer to store the values in a YAML file and pass it to Boilerplate using the `--var-file` flag, you can do so like this:
+
+```yaml title="vars.yml"
+AccountName: prod
+GitHubOrgName: acme
+GitHubRepoName: infrastructure-live
+SubscriptionName: prod
+AzureTenantID: 00000000-0000-0000-0000-000000000000
+AzureSubscriptionID: 99999999-9999-9999-9999-999999999999
+AzureLocation: East US
+StateResourceGroupName: pipelines-rg
+StateStorageAccountName: myprodsa
+StateStorageContainerName: tfstate
+```
+
+```bash
+boilerplate \
+ --template-url 'github.com/gruntwork-io/terragrunt-scale-catalog//templates/boilerplate/azure/github/subscription?ref=v1.0.0' \
+ --output-folder . \
+ --var-file vars.yml \
+ --non-interactive
+```
+
+:::note Progress Checklist
+
+
+
+:::
+
+To avoid issues with the remote state backend not existing yet, you'll want to comment out your remote state backend configurations in your `root.hcl` file before you start the bootstrap process for these new subscriptions.
+
+```hcl title="root.hcl"
+locals {
+ sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl"))
+
+ state_resource_group_name = local.sub_hcl.locals.state_resource_group_name
+ state_storage_account_name = local.sub_hcl.locals.state_storage_account_name
+ state_storage_container_name = local.sub_hcl.locals.state_storage_container_name
+}
+
+# FIXME: Temporarily commented out again, pending successful bootstrap of the new subscription(s).
+#
+# remote_state {
+# backend = "azurerm"
+# generate = {
+# path = "backend.tf"
+# if_exists = "overwrite"
+# }
+# config = {
+# resource_group_name = local.state_resource_group_name
+# storage_account_name = local.state_storage_account_name
+# container_name = local.state_storage_container_name
+# key = "${path_relative_to_include()}/tofu.tfstate"
+# }
+# }
+
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+:::
+
+Just like before, you can use Terragrunt to provision the resources in each of these subscriptions.
+
+For each subscription you want to bootstrap, you'll need to run the following commands:
+
+```bash
+cd /_global/bootstrap
+terragrunt run --all --non-interactive --provider-cache plan
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate apply
+```
+
+:::tip
+
+We're adding the `--no-stack-generate` flag here, as Terragrunt will already have the requisite stack configurations generated, and we don't want to accidentally overwrite any configurations while we have state stored locally before we pull them into remote state.
+
+:::
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+Next, you can pull the resources into state using the storage account we just provisioned.
+
+First, edit the `root.hcl` file in the root of your `infrastructure-live` repository to uncomment the remote state backend configurations you commented out earlier.
+
+```hcl title="root.hcl"
+locals {
+ sub_hcl = read_terragrunt_config(find_in_parent_folders("sub.hcl"))
+
+ state_resource_group_name = local.sub_hcl.locals.state_resource_group_name
+ state_storage_account_name = local.sub_hcl.locals.state_storage_account_name
+ state_storage_container_name = local.sub_hcl.locals.state_storage_container_name
+}
+
+remote_state {
+ backend = "azurerm"
+ generate = {
+ path = "backend.tf"
+ if_exists = "overwrite"
+ }
+ config = {
+ resource_group_name = local.state_resource_group_name
+ storage_account_name = local.state_storage_account_name
+ container_name = local.state_storage_container_name
+ key = "${path_relative_to_include()}/tofu.tfstate"
+ }
+}
+
+generate "provider" {
+ path = "provider.tf"
+ if_exists = "overwrite_terragrunt"
+ contents = <
+
+:::
+
+Next, you can pull the resources into state using the storage account we just provisioned.
+
+```bash title="name-of-subscription"
+terragrunt run --all --non-interactive --provider-cache --no-stack-generate -- init -migrate-state -force-copy
+```
+
+:::tip
+
+We're adding the `-force-copy` flag here to avoid any issues with OpenTofu waiting for an interactive prompt to copy up local state.
+
+:::
+
+:::note Progress Checklist
+
+
+
+:::
+
+Finally, we can edit each of the `.gruntwork/environment-.hcl` files in the root of your `infrastructure-live` repository to reference the IDs for the applications we just provisioned.
+
+```hcl title=".gruntwork/environment-.hcl"
+environment "prod" {
+ filter {
+ paths = ["prod/*"]
+ }
+
+ authentication {
+ azure_oidc {
+ tenant_id = "00000000-0000-0000-0000-000000000000"
+ subscription_id = "99999999-9999-9999-9999-999999999999"
+
+ plan_client_id = "" # FIXME: Fill in the client ID for the plan application after bootstrapping
+ apply_client_id = "" # FIXME: Fill in the client ID for the apply application after bootstrapping
+ }
+ }
+}
+```
+
+You can find the values for the `plan_client_id` and `apply_client_id` by running `terragrunt stack output` in the `bootstrap` directory in `name-of-subscription/bootstrap`.
+
+```bash
+terragrunt stack output
+```
+
+The relevant bits that you want to extract from the stack output are the following:
+
+```hcl
+bootstrap = {
+ apply_app = {
+ client_id = "55555555-5555-5555-5555-555555555555"
+ }
+ plan_app = {
+ client_id = "66666666-6666-6666-6666-666666666666"
+ }
+}
+```
+
+You can use those values to set the values for `plan_client_id` and `apply_client_id` in the `.gruntwork/environment-.hcl` file.
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+
+
+
+## Commit and push your changes
+
+Commit and push your changes to your repository.
+
+:::note
+
+You should include `[skip ci]` in your commit message here to prevent triggering the Pipelines workflow.
+
+:::
+
+```bash
+git add .
+git commit -m "Add Pipelines GitHub Actions workflow [skip ci]"
+git push
+```
+
+:::note Progress Checklist
+
+
+
+
+:::
+
+🚀 You've successfully added Gruntwork Pipelines to your new repository!
+
+## Next steps
+
+You have successfully completed the installation of Gruntwork Pipelines in a new repository. Proceed to [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) to begin deploying changes.
diff --git a/docs/2.0/docs/pipelines/installation/authoverview.md b/docs/2.0/docs/pipelines/installation/authoverview.md
index 6ed8a85d8d..b9023e00bc 100644
--- a/docs/2.0/docs/pipelines/installation/authoverview.md
+++ b/docs/2.0/docs/pipelines/installation/authoverview.md
@@ -1,25 +1,32 @@
-# Authenticating Gruntwork Pipelines
+# SCM Authentication Overview
-Gruntwork Pipelines requires authentication with GitHub/GitLab to perform various functions, including:
-* Downloading Gruntwork code, such as the Pipelines binary and Terraform modules, from the `gruntwork-io` GitHub organization.
-* Interacting with your repositories, such as:
- * Creating pull requests.
- * Commenting on pull requests.
- * Creating new repositories via Account Factory.
- * Updating repository settings, such as enforcing branch protection, via Account Factory.
+Gruntwork Pipelines requires authentication with Source Control Management (SCM) platforms (e.g. GitHub, GitLab) for various reasons, including:
-Gruntwork provides two authentication methods: a [GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md) and CI Users ([Machine Users](/2.0/docs/pipelines/installation/viamachineusers.md)) with personal access tokens for Pipelines.
+- Downloading Gruntwork software, such as the Pipelines binary and OpenTofu modules, from the `gruntwork-io` GitHub organization.
+- Interacting with your repositories, such as:
+ - Creating pull requests.
+ - Commenting on pull requests.
+ - Creating new repositories via Account Factory.
+ - Updating repository settings, such as enforcing branch protection with Account Factory.
-Both approaches support the core functionality of Pipelines. However, the GitHub App provides additional features and benefits, making it the recommended method. While Gruntwork strives to ensure feature parity between the two authentication mechanisms, certain advanced features are exclusive to the GitHub App, and this list is expected to grow over time.
+Gruntwork provides two authentication methods:
+
+- [The Gruntwork.io GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md)
+- [CI Users (Machine Users)](/2.0/docs/pipelines/installation/viamachineusers)
+
+Both approaches support the core functionality of Pipelines. The GitHub App provides additional features and benefits, making it the recommended method for most customers that can use it. While Gruntwork strives to ensure feature parity between the two authentication mechanisms, certain advanced features are exclusive to the GitHub App, and this list is expected to grow over time.
## Summary of authentication mechanisms for GitHub
**Advantages of the GitHub App**:
+
- Simplified setup process.
- Access to enhanced features and functionality.
- Improved user experience during regular operations.
- Reduced maintenance, as there is no need to install, maintain, or rotate powerful tokens.
**Advantages of Machine Users**:
+
- Compatibility with on-premises GitHub Enterprise installations that cannot interact with third-party servers (e.g., Gruntwork's backend).
- Provides a fallback solution to ensure Pipelines continue functioning in the unlikely event of an outage affecting the Gruntwork-hosted backend that powers the GitHub App.
+- Allows GitLab customers to download the Pipelines binary from GitLab CI Pipelines.
diff --git a/docs/2.0/docs/pipelines/installation/branch-protection.md b/docs/2.0/docs/pipelines/installation/branch-protection.mdx
similarity index 58%
rename from docs/2.0/docs/pipelines/installation/branch-protection.md
rename to docs/2.0/docs/pipelines/installation/branch-protection.mdx
index c9dcda359e..399ca57d43 100644
--- a/docs/2.0/docs/pipelines/installation/branch-protection.md
+++ b/docs/2.0/docs/pipelines/installation/branch-protection.mdx
@@ -1,14 +1,8 @@
-# Branch Protection
+# Adding Branch Protection to a GitHub Repository
-Gruntwork Pipelines is designed to function within a PR-based workflow. Approving a pull request (PR) or merge request (MR) signals approval to deploy infrastructure, so it's important to configure repository settings and branch protection accurately.
+Gruntwork Pipelines is designed to function within a pull request (PR) based workflow. Approving a pull request signals approval to deploy infrastructure, so it's important to configure repository settings and branch protection accurately.
-## Recommended settings
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-
-
+## GitHub Recommended Settings
By default, Gruntwork Pipelines runs a `plan` on every push to a PR and an `apply` on every push to `main`. To ensure that infrastructure changes are reviewed and approved before deployment, branch protection should be enabled on `main` to prevent unauthorized changes.
@@ -40,32 +34,13 @@ Below is an example of the recommended branch protection settings:
GitHub Enterprise customers can also configure [push rulesets](https://docs.github.com/en/enterprise-cloud@latest/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/about-rulesets#push-rulesets). This feature allows restricting edits to `.github/workflows` files, ensuring infrastructure changes are properly reviewed and approved through Pipelines. Follow the documentation [here](https://docs.github.com/en/enterprise-cloud@latest/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/creating-rulesets-for-a-repository#creating-a-push-ruleset) to enable push rulesets if available.
:::
-
-
-
-## GitLab Recommended Settings
-
-For GitLab repositories, similar protection rules should be configured on the default branch (typically `main`). Navigate to `Settings > Repository > Protected branches` to configure the following settings:
-
-- Set the initial default branch to **Protected**.
-- Set **Allowed to merge** to "Developers" or a specific group to control who can merge changes.
-- Set **Allowed to push** to "No one" to prevent direct pushes to the protected branch.
-- (Optional) Enable **Require approval from code owners** to ensure designated reviewers approve changes to specific files.
-
-Below is an example of the recommended GitLab branch protection settings:
-
-
-
-
-
-
-## Merge Request/Pull Request Workflow
+## Pull Request Workflow
-1. Developers make infrastructure changes on a branch and create a PR (GitHub) or MR (GitLab) against the default branch.
-2. On merge request/pull request creation, Gruntwork Pipelines runs `plan` for any changes and posts the results as a comment.
+1. Developers make infrastructure changes on a branch and create a pull request (PR) against the default branch.
+2. On pull request creation, Gruntwork Pipelines runs `plan` for any changes and posts the results as a comment.
3. Gruntwork Pipelines re-runs `plan` on every push to the branch and updates the results in a comment.
4. Gather approvals. If Code Owners is enabled, all relevant code owners must approve the changes.
-5. Once approved, merge the merge request/pull request into the default branch.
-6. Gruntwork Pipelines runs `apply` for the changes from the merge request/pull request.
- - On success, the merge request/pull request is updated to indicate the successful `apply`.
- - On failure, the merge request/pull request is updated to indicate the failure of the `apply`. If the failure cannot be resolved by retrying, a new merge request/pull request must be created to address the issues.
+5. Once approved, merge the pull request into the default branch.
+6. Gruntwork Pipelines runs `apply` for the changes from the pull request.
+ - On success, the pull request is updated to indicate the successful `apply`.
+ - On failure, the pull request is updated to indicate the failure of the `apply`. If the failure cannot be resolved by retrying, a new pull request must be created to address the issues.
diff --git a/docs/2.0/docs/pipelines/installation/gitlab-branch-protection.md b/docs/2.0/docs/pipelines/installation/gitlab-branch-protection.md
new file mode 100644
index 0000000000..51d936034e
--- /dev/null
+++ b/docs/2.0/docs/pipelines/installation/gitlab-branch-protection.md
@@ -0,0 +1,27 @@
+# Adding Branch Protection to a GitLab Project
+
+Gruntwork Pipelines is designed to function within a merge request (MR) based workflow. Approving a merge request signals approval to deploy infrastructure, so it's important to configure repository settings and branch protection accurately.
+
+## GitLab Recommended Settings
+
+For GitLab repositories, similar protection rules should be configured on the default branch (typically `main`). Navigate to `Settings > Repository > Protected branches` to configure the following settings:
+
+- Set the initial default branch to **Protected**.
+- Set **Allowed to merge** to "Developers" or a specific group to control who can merge changes.
+- Set **Allowed to push** to "No one" to prevent direct pushes to the protected branch.
+- (Optional) Enable **Require approval from code owners** to ensure designated reviewers approve changes to specific files.
+
+Below is an example of the recommended GitLab branch protection settings:
+
+
+
+## Merge Request Workflow
+
+1. Developers make infrastructure changes on a branch and create a merge request (MR) against the default branch.
+2. On merge request creation, Gruntwork Pipelines runs `plan` for any changes and posts the results as a comment.
+3. Gruntwork Pipelines re-runs `plan` on every push to the branch and updates the results in a comment.
+4. Gather approvals. If Code Owners is enabled, all relevant code owners must approve the changes.
+5. Once approved, merge the merge request into the default branch.
+6. Gruntwork Pipelines runs `apply` for the changes from the merge request.
+ - On success, the merge request is updated to indicate the successful `apply`.
+ - On failure, the merge request is updated to indicate the failure of the `apply`. If the failure cannot be resolved by retrying, a new merge request must be created to address the issues.
diff --git a/docs/2.0/docs/pipelines/installation/overview.md b/docs/2.0/docs/pipelines/installation/overview.md
index 1a2320dc2b..aa37ab3978 100644
--- a/docs/2.0/docs/pipelines/installation/overview.md
+++ b/docs/2.0/docs/pipelines/installation/overview.md
@@ -2,9 +2,9 @@
Pipelines integrates multiple technologies to deliver a comprehensive CI/CD solution. This guide outlines the available installation methods and their respective use cases.
-## Installation as part of DevOps Foundations
+## Installation as part of Account Factory
-Customers using DevOps Foundations benefit from a guided setup process that includes the complete installation of Gruntwork Pipelines. This process is facilitated by a Gruntwork solutions engineer and includes the following steps:
+Customers using Account Factory benefit from a guided setup process that includes the complete installation of Gruntwork Pipelines. This process is facilitated by a Gruntwork solutions engineer and includes the following steps:
1. Creating a new `infrastructure-live-root` repository from the [`infrastructure-live-root-template`](https://github.com/gruntwork-io/infrastructure-live-root-template) template.
2. (On GitHub) Installing the [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) on the `infrastructure-live-root` repository or across the entire organization. For detailed instructions, refer to [this guide](/2.0/docs/pipelines/installation/viagithubapp).
@@ -12,11 +12,11 @@ Customers using DevOps Foundations benefit from a guided setup process that incl
Completing these steps results in a repository fully configured for automated infrastructure deployments using GitOps workflows.
-## Installation via manual setup
+## Standalone Installation
-For users not leveraging DevOps Foundations or needing Gruntwork Pipelines for a standalone repository with existing Terragrunt configurations, Gruntwork Pipelines can be installed as an independent GitHub Actions or GitLab pipelines workflow.
+For users not leveraging Account Factory or needing Gruntwork Pipelines for a standalone repository with existing Terragrunt configurations, Gruntwork Pipelines can be installed as an independent GitHub Actions Workflow or GitLab CI Pipeline.
-To learn more about this process, consult the documentation for [Adding Pipelines to an Existing Repository](/2.0/docs/pipelines/installation/addingexistingrepo).
+To learn more about this process, consult the documentation for [Adding Pipelines to a New Repository](/2.0/docs/pipelines/installation/addingnewrepo) or [Adding Pipelines to an Existing Repository](/2.0/docs/pipelines/installation/addingexistingrepo).
## Platform differences
@@ -29,15 +29,9 @@ For GitHub Actions, you have two authentication options:
1. [GitHub App Authentication](/2.0/docs/pipelines/installation/viagithubapp) (Recommended)
2. [Machine User Authentication](/2.0/docs/pipelines/installation/viamachineusers)
-### GitLab CI/CD (Beta)
+### GitLab CI/CD
For GitLab CI/CD:
1. [Machine User Authentication](/2.0/docs/pipelines/installation/viamachineusers) is the only supported method
-2. Contact Gruntwork support to authorize your GitLab groups
-
-:::note
-
- Account Factory features are not currently available on GitLab
-
- :::
\ No newline at end of file
+2. Contact [Gruntwork support](/support) to authorize your GitLab groups
diff --git a/docs/2.0/docs/pipelines/installation/scm-comparison.md b/docs/2.0/docs/pipelines/installation/scm-comparison.md
index c4f9f44d96..01de723c99 100644
--- a/docs/2.0/docs/pipelines/installation/scm-comparison.md
+++ b/docs/2.0/docs/pipelines/installation/scm-comparison.md
@@ -7,7 +7,8 @@ Gruntwork Pipelines supports both GitHub Actions and GitLab CI/CD as CI/CD platf
| Feature | GitHub | GitLab (Beta) |
| -------------------------------- | --------------------------- | ---------------------------- |
| Infrastructure as Code Pipelines | ✅ | ✅ |
-| Account Factory Integration | ✅ | ❌ |
+| Account Factory Integration | ✅ | ✅ |
+| Enterprise Account Factory | ✅ | ❌ |
| App-based Authentication | ✅ | ❌ |
| Machine User Authentication | ✅ | ✅ |
| Customizable Workflows | ✅ | ✅ |
diff --git a/docs/2.0/docs/pipelines/installation/viagithubapp.md b/docs/2.0/docs/pipelines/installation/viagithubapp.md
index b2dd70375c..501f83dfcc 100644
--- a/docs/2.0/docs/pipelines/installation/viagithubapp.md
+++ b/docs/2.0/docs/pipelines/installation/viagithubapp.md
@@ -4,7 +4,7 @@ toc_min_heading_level: 2
toc_max_heading_level: 3
---
-# Pipelines Install via GitHub App
+# Installing the Gruntwork.io GitHub App
The [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) is a [GitHub App](https://docs.github.com/en/apps/overview) introduced to help reduce the burden of integrating Gruntwork products to GitHub resources. The app is designed to be lightweight and flexible, providing a simple way to get started with Gruntwork products.
@@ -13,6 +13,7 @@ The [Gruntwork.io GitHub App](https://github.com/apps/gruntwork-io) is a [GitHub
At this time Gruntwork does not provide an app for GitLab, this page is only relevant for Gruntwork Pipelines users installing in GitHub.
:::
+
## Overview
There are three major components to keep in mind when working with the Gruntwork.io GitHub App:
@@ -28,6 +29,7 @@ The Gruntwork.io GitHub App is the principal that Gruntwork products will utiliz
#### Required Permissions
As of 2024/09/10, the Gruntwork.io GitHub App requests the following permissions:
+
- **Read access to Actions**: Allows the app to read GitHub Actions artifacts.
- **Write access to Administration**: Allows the app to create new repositories, and add teams as collaborators to repositories.
- **Write access to Contents**: Allows the app to read and write repository contents.
@@ -40,13 +42,15 @@ As of 2024/09/10, the Gruntwork.io GitHub App requests the following permissions
Gruntwork.io requests all of these permissions because it requires them for different operations. Unfortunately, the way GitHub apps work prevents us from requesting permissions on a more granular basis. Know that the GitHub App Service will scope down its permissions whenever possible to the minimum required for the operation at hand.
- The level of granularity available to customers when configuring the GitHub App installation is to either install the app on a per-repository basis or on an entire organization. Our recommendation is as follows:
+ The level of granularity available to customers when configuring the GitHub App installation is to either install the app on a per-repository basis or on an entire organization. Our recommendation is as follows for Account Factory customers:
+
+ - For non-enterprise customers, allow the app for `infrastructure-live-root` repository and (if in-use) `infrastructure-live-access-control` and `infrastructure-catalog`.
- * For non-enterprise customers, allow the app for `infrastructure-live-root` repository and (if in-use) `infrastructure-live-access-control` and `infrastructure-catalog`.
- * For enterprise customers, allow the app to have access to the entire organization.
+ - For enterprise customers, allow the app to have access to the entire organization.
-The reasoning for requiring entire-organization access for enterprise customers is that if you are using Account Factory to create delegated repositories then Account Factory will be creating, and then immediately modifying, new repositories in automated flows, which means it needs access to new repos as soon as they are created which is only possible with entire organization permission.
+ For non-Account Factory customers, we recommend installing the app on a per-repository basis.
+ The reasoning for requiring entire-organization access for enterprise customers is that if you are using Account Factory to create delegated repositories then Account Factory will be creating, and then immediately modifying, new repositories in automated flows, which means it needs access to new repos as soon as they are created which is only possible with entire organization permission.
If you are unsure how to proceed here, reach out to Gruntwork Support for guidance.
@@ -62,7 +66,7 @@ The reasoning for requiring entire-organization access for enterprise customers
These permissions are used during the initial bootstrapping process when customers opt-in to additional repositories being created outside of the main `infrastructure-live-root` repository.
- This is especially important for DevOps Foundations Enterprise customers, as those customers benefit from the ability to have `infrastructure-live-root` repositories create new repositories and add designated GitHub teams as collaborators via Infrastructure as Code (IaC). This is a critical feature for Enterprise customers who want to be able to scale their infrastructure management across multiple teams with delegated responsibility for segments of their IaC Estate.
+ This is especially important for Account Factory Enterprise customers, as those customers benefit from the ability to have `infrastructure-live-root` repositories create new repositories and add designated GitHub teams as collaborators via Infrastructure as Code (IaC). This is a critical feature for Enterprise customers who want to be able to scale their infrastructure management across multiple teams with delegated responsibility for segments of their IaC Estate.
Write access to Contents
@@ -108,7 +112,7 @@ The GitHub App Service is used by two major clients:
2. **Gruntwork Pipelines**
- The main client for the Gruntwork.io App, and where most of the value is derived. Pipelines uses the GitHub App Service to acquire the relevant access for interacting with GitHub resources on behalf of the user. Access control rules are enforced here to ensure that only the level of access required, and explicitly specified in the Gruntwork Developer Portal can be used by Pipelines to interact with GitHub resources on behalf of the user.
+ The main client for the Gruntwork.io App, and where most of the value is derived. Pipelines uses the GitHub App Service to acquire the relevant access for interacting with GitHub resources on behalf of the user. Access control rules are enforced here to ensure that only the level of access required (and explicitly specified in the Gruntwork Developer Portal) can be used by Pipelines to interact with GitHub resources on behalf of the user.
For example, while the Gruntwork.io GitHub App does have permissions to create new repositories, Pipelines will only do so if a workflow originating from a configured `infrastructure-live-root` repository requests it.
@@ -118,7 +122,7 @@ The availability of the Gruntwork.io GitHub App is something Gruntwork will ende
Any downtime of Gruntwork services will not impact the ability of your team to manage infrastructure using Gruntwork products.
-#### App Only Features
+### App Only Features
The following features of the Gruntwork.io GitHub App will be unavailable during downtime:
@@ -126,11 +130,11 @@ The following features of the Gruntwork.io GitHub App will be unavailable during
- **Gruntwork Pipelines Comments**: While Pipelines will allow for IaC updates in a degraded state without the availability of the GitHub App, comments are a feature that rely on the availability of the app for the best experience.
- **Gruntwork Pipelines Drift Detection**: Drift detection requires the availability of the GitHub App to function correctly.
-#### Fallback
+### Fallback
-In order to ensure that the availability of the Gruntwork.io GitHub App is not something that can impair the ability of users to drive infrastructure updates, the legacy mechanism of authenticating with GitHub using [Machine users](/2.0/docs/pipelines/installation/viamachineusers.md) is still supported.
+In order to ensure that the availability of the Gruntwork.io GitHub App is not something that can impair the ability of users to drive infrastructure updates, users can also authenticate with GitHub using [Machine users](/2.0/docs/pipelines/installation/viamachineusers).
-Configuring the `PIPELINES_READ_TOKEN`, `INFRA_ROOT_WRITE_TOKEN` and `ORG_REPO_ADMIN_TOKEN` where necessary (following the documentation linked above) will result in Pipelines using the legacy mechanism to authenticate with GitHub, rather than the Gruntwork.io GitHub App.
+Configuring the `PIPELINES_READ_TOKEN`, `INFRA_ROOT_WRITE_TOKEN` and `ORG_REPO_ADMIN_TOKEN` where necessary (following the documentation linked above) will result in Pipelines using the machine users mechanism to authenticate with GitHub, rather than the Gruntwork.io GitHub App.
Using these fallback tokens will ensure that Pipelines can continue to perform operations like:
@@ -160,9 +164,9 @@ To install the Gruntwork.io GitHub App in your organization follow these steps.
## Configuration
-
Infrastructure Live Root Repositories
+### Infrastructure Live Root Repositories
-DevOps Foundations treats certain repositories as especially privileged in order to perform critical operations like vending new AWS accounts and creating new repositories. These repositories are called "infrastructure live root repositories" and you can configure them in the [GitHub Account section](https://app.gruntwork.io/account?scroll_to=github-app) for your organization in the Gruntwork developer portal **if you are a designated administrator**.
+Account Factory treats certain repositories as especially privileged in order to perform critical operations like vending new AWS accounts and creating new repositories. These repositories are called "infrastructure live root repositories" and you can configure them in the [GitHub Account section](https://app.gruntwork.io/account?scroll_to=github-app) for your organization in the Gruntwork developer portal **if you are a designated administrator**.

@@ -174,7 +178,7 @@ For more information, see the [relevant architecture documentation](/2.0/docs/pi
## Frequently Asked Questions
-#### How do I find my Gruntwork.io GitHub App installation ID?
+### How do I find my Gruntwork.io GitHub App installation ID?
You can find the installation ID of the Gruntwork.io GitHub App in the URL of the installation page.
diff --git a/docs/2.0/docs/pipelines/installation/viamachineusers.md b/docs/2.0/docs/pipelines/installation/viamachineusers.mdx
similarity index 88%
rename from docs/2.0/docs/pipelines/installation/viamachineusers.md
rename to docs/2.0/docs/pipelines/installation/viamachineusers.mdx
index d708514c31..45d6ead0a9 100644
--- a/docs/2.0/docs/pipelines/installation/viamachineusers.md
+++ b/docs/2.0/docs/pipelines/installation/viamachineusers.mdx
@@ -3,12 +3,14 @@
toc_min_heading_level: 2
toc_max_heading_level: 4
---
-# Setting up Pipelines via GitHub Machine Users
+
+# Creating Machine Users
+
import PersistentCheckbox from '/src/components/PersistentCheckbox';
import Tabs from "@theme/Tabs"
import TabItem from "@theme/TabItem"
-For GitHub users, of the [two methods](/2.0/docs/pipelines/installation/authoverview.md) for installing Gruntwork Pipelines, we strongly recommend using the [GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md). However, if the GitHub App cannot be used or if machine users are required as a [fallback](http://localhost:3000/2.0/docs/pipelines/installation/viagithubapp#fallback), this guide outlines how to set up authentication for Pipelines using access tokens and machine users.
+For GitHub users, of the [two methods](/2.0/docs/pipelines/installation/authoverview.md) for installing Gruntwork Pipelines, we strongly recommend using the [GitHub App](/2.0/docs/pipelines/installation/viagithubapp.md). However, if the GitHub App cannot be used or if machine users are required as a [fallback](/2.0/docs/pipelines/installation/viagithubapp#fallback), this guide outlines how to set up authentication for Pipelines using access tokens and machine users.
For GitHub or GitLab users, when using tokens, Gruntwork recommends setting up CI users specifically for Gruntwork Pipelines, separate from human users in your organization. This separation ensures workflows are not disrupted if an employee leaves the company and allows for more precise permission management. Additionally, using CI users allow you to apply granular permissions that may normally be too restrictive for a normal employee to do their daily work.
@@ -19,6 +21,7 @@ This guide will take approximately 30 minutes to complete.
:::
## Background
+
### Guidance on storing secrets
During this process, you will generate and securely store several access tokens. Use a temporary but secure location for these sensitive values between generating them and storing them in GitHub or GitLab. Follow your organization's security best practices and avoid insecure methods (e.g., Slack or sticky notes) during this exercise.
@@ -36,7 +39,7 @@ If screen sharing while generating tokens, **pause or hide your screen** before
:::
### Token types
-
+
GitHub supports two types of tokens:
@@ -77,7 +80,7 @@ More information is available [here](https://docs.github.com/en/organizations/ma

-
+
GitLab uses access tokens for authentication. There are several types of access tokens in GitLab:
@@ -87,16 +90,17 @@ GitLab uses access tokens for authentication. There are several types of access
For Pipelines, we recommend using Project or Group Access Tokens.
-Note that Project and Group access tokens are only available in certain GitLab licenses. Specifically:
+Note that Project and Group access tokens are only available in certain GitLab licenses. Specifically:
[Project Access Tokens](https://docs.gitlab.com/user/project/settings/project_access_tokens/#token-availability)
-* On GitLab SaaS: If you have the Premium or Ultimate license tier, only one project access token is available with a [trial license](https://about.gitlab.com/free-trial/).
-* On GitLab Self-Managed instances: With any license tier. If you have the Free tier, consider [restricting the creation of project access tokens](https://docs.gitlab.com/user/project/settings/project_access_tokens/#restrict-the-creation-of-project-access-tokens) to lower potential abuse.
+
+- On GitLab SaaS: If you have the Premium or Ultimate license tier, only one project access token is available with a [trial license](https://about.gitlab.com/free-trial/).
+- On GitLab Self-Managed instances: With any license tier. If you have the Free tier, consider [restricting the creation of project access tokens](https://docs.gitlab.com/user/project/settings/project_access_tokens/#restrict-the-creation-of-project-access-tokens) to lower potential abuse.
[Group Access Tokens](https://docs.gitlab.com/user/group/settings/group_access_tokens/)
-* On GitLab.com, you can use group access tokens if you have the Premium or Ultimate license tier. Group access tokens are not available with a [trial license](https://about.gitlab.com/free-trial/).
-* On GitLab Dedicated and self-managed instances, you can use group access tokens with any license tier.
+- On GitLab.com, you can use group access tokens if you have the Premium or Ultimate license tier. Group access tokens are not available with a [trial license](https://about.gitlab.com/free-trial/).
+- On GitLab Dedicated and self-managed instances, you can use group access tokens with any license tier.
@@ -107,8 +111,8 @@ When creating tokens, carefully consider the expiration date and scope of access
## Creating machine users
-
-
+
+
The recommended setup for Pipelines uses two machine users: one for opening pull requests and running workflows (`ci-user`) and another with read-only access to repositories (`ci-read-only-user`). Each user is assigned restrictive permissions based on their tasks. As a result, both users may need to participate at different stages to successfully run a pipeline job.
@@ -116,7 +120,7 @@ Both the `ci-user` and the `ci-read-only-user` must:
1. Be members of your GitHub Organization.
-2. Be added to your team in **Gruntwork**’s GitHub Organization (See [instructions on inviting a user to your team](https://docs.gruntwork.io/developer-portal/invite-team#inviting-team-members) and [linking the user’s GitHub ID to Gruntwork](https://docs.gruntwork.io/developer-portal/link-github-id)).
+2. Be added to your team in **Gruntwork**’s GitHub Organization (See [instructions on inviting a user to your team](https://docs.gruntwork.io/developer-portal/invite-team#inviting-team-members) and [linking the user’s GitHub ID to Gruntwork](https://docs.gruntwork.io/developer-portal/link-github-id)).
:::tip
We recommend creating two machine users for better access control, but you may adjust this setup to fit your organization’s needs. Ensure permissions are appropriate for their roles, and note that additional GitHub licenses may be required if at capacity.
@@ -141,6 +145,7 @@ Ensure the `ci-user` has write access to your:
- `infrastructure-live-access-control` repository
**Checklist:**
+
**Create access tokens for the `ci-user`**
@@ -148,13 +153,13 @@ Ensure the `ci-user` has write access to your:
Generate the required tokens for the ci-user in their GitHub account.
**Checklist:**
+
-
#### INFRA_ROOT_WRITE_TOKEN
-This [fine-grained](#fine-grained) Personal Access Token allows GitHub Actions to clone `infrastructure-live-root`, open pull requests, and update comments.
+This [fine-grained](#fine-grained-tokens) Personal Access Token allows GitHub Actions to clone `infrastructure-live-root`, open pull requests, and update comments.
This token must have the following permissions to the `INFRA_ROOT_WRITE_TOKEN` for the `infrastructure-live-root` repository:
@@ -175,18 +180,23 @@ Below is a detailed breakdown of the permissions needed for the `INFRA_ROOT_WRIT
If you are not an Enterprise customer or prefer Pipelines not to execute certain behaviors, you can opt not to grant the related permissions.
##### Content read & write access
+
Needed for cloning `infrastructure-live-root` and pushing automated changes. Without this permission, the pull request opened by the GitHub Actions workflow will not trigger automation during account vending.
##### Issues read & write access
+
Allows Pipelines to open issues that alert teams when manual action is required.
##### Metadata read access
+
Grants visibility into repository metadata.
##### Pull requests read & write access
+
Allows Pipelines to create pull requests to introduce infrastructure changes.
##### Workflows read & write access
+
Required to update workflows when provisioning new repositories.
@@ -215,42 +225,47 @@ The following is a breakdown of the permissions needed for the `ORG_REPO_ADMIN_T
If you are not an Enterprise customer or prefer Pipelines not to carry out certain actions, you can choose to withhold the related permissions.
##### Administration read & write access
+
Allows the creation of new repositories for delegated infrastructure management.
##### Content read & write access
+
Used for bootstrapping repositories and populating them with necessary content.
##### Metadata read access
+
Grants repository-level insights needed for automation.
##### Pull requests read & write access
- This is required to open pull requests. When vending delegated repositories for Enterprise customers, Pipelines will open pull requests to automate the process of introducing new Infrastructure as Code changes to drive infrastructure updates.
+
+This is required to open pull requests. When vending delegated repositories for Enterprise customers, Pipelines will open pull requests to automate the process of introducing new Infrastructure as Code changes to drive infrastructure updates.
##### Workflows read & write access
- This is required to update GitHub Action workflow files. When vending delegated repositories for Enterprise customers, Pipelines will create new repositories, including content in the `.github/workflows` directory. Without this permission, Pipelines would not be able to provision repositories with this content.
+
+This is required to update GitHub Action workflow files. When vending delegated repositories for Enterprise customers, Pipelines will create new repositories, including content in the `.github/workflows` directory. Without this permission, Pipelines would not be able to provision repositories with this content.
##### Members read & write access
- Required to update GitHub organization team members. When vending delegated repositories for Enterprise customers, Pipelines will add team members to a team that has access to a delegated repository. Without this permission, Pipelines would not be able to provision repositories that are accessible to the correct team members.
+Required to update GitHub organization team members. When vending delegated repositories for Enterprise customers, Pipelines will add team members to a team that has access to a delegated repository. Without this permission, Pipelines would not be able to provision repositories that are accessible to the correct team members.
-
:::tip
-If you are not an Enterprise customer, you should delete it after DevOps Foundations setup.
+If you are not an Enterprise customer, you should delete it after Account Factory onboarding.
:::
### ci-read-only-user
The `ci-read-only-user` is configured to download private software within GitHub Actions workflows. This user is responsible for accessing Gruntwork IaC Library modules, your infrastructure-modules repository, other private custom module repositories, and the Pipelines CLI.
-This user should use a single classic Personal Access Token (PAT)(https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#personal-access-tokens-classic) with read-only permissions. Since classic PATs offer coarse grained access controls, it’s recommended to assign this user to a GitHub team with READ access limited to the `infrastructure-live-root` repository and any relevant module repositories within your GitHub Organization. Adding this user to the Gruntwork Developer Portal will automatically grant access to the Gruntwork IaC Library.
+This user should use a single classic [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#personal-access-tokens-classic) with read-only permissions. Since classic PATs offer coarse grained access controls, it’s recommended to assign this user to a GitHub team with READ access limited to the `infrastructure-live-root` repository and any relevant module repositories within your GitHub Organization. Adding this user to the Gruntwork Developer Portal will automatically grant access to the Gruntwork IaC Library.
**Invite ci-read-only-user to your repository**
Invite `ci-user-read-only` to your `infrastructure-live-root` repository with read access.
**Checklist:**
+
**Create a token for ci-read-only-user**
@@ -260,8 +275,6 @@ Generate the following token for the `ci-read-only-user`:
**Checklist:**
-
-
#### PIPELINES_READ_TOKEN
This [Classic Personal Access Token](#classic-tokens) manages access to private software during GitHub Action runs.
@@ -275,6 +288,7 @@ This token must have `repo` scopes. Gruntwork recommends setting expiration to 9
Make sure both machine users are added to your team in Gruntwork’s GitHub Organization. Refer to the [instructions for inviting a user to your team](https://docs.gruntwork.io/developer-portal/invite-team#inviting-team-members) and [linking the user’s GitHub ID to Gruntwork](https://docs.gruntwork.io/developer-portal/link-github-id) for guidance.
**Checklist:**
+
## Configure secrets for GitHub Actions
@@ -287,11 +301,14 @@ Since this guide uses secrets scoped to specific repositories, the token permiss
+
**Checklist:**
+
+
1. Navigate to your top-level GitHub Organization and select the **Settings** tab.
2. From the navigation bar on the left side, choose **Secrets and variables**, then select **Actions**.
@@ -345,13 +362,16 @@ For more details on creating and using GitHub Actions Organization secrets, refe
+
**Checklist:**
+
+
Gruntwork Pipelines retrieves these secrets from GitHub Actions secrets configured in the repository. For instructions on creating repository Actions secrets, refer to [creating secrets for a repository](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository).
### `infrastructure-live-root`
@@ -378,8 +398,8 @@ If you are **not an Enterprise customer**, you should also do the following:
- Delete the `ORG_REPO_ADMIN_TOKEN` Personal Access Token from the `ci-user`’s GitHub account.
- Remove the `ORG_REPO_ADMIN_TOKEN` Repository secret from the `infrastructure-live-root` repository.
-:::
+:::
:::info
For more information on creating and using GitHub Actions Repository secrets, refer to the [GitHub Documentation](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository).
@@ -389,15 +409,14 @@ For more information on creating and using GitHub Actions Repository secrets, re
-
-
-
-For GitLab, Gruntwork Pipelines two CI variables. The first, the `PIPELINES_GITLAB_TOKEN` requires the `Developer`, `Maintainer` or `Owner` role and the scopes listed below. This token will be used to authenticate API calls and access repositories within your GitLab group. The second, the `PIPELINES_GITLAB_READ_TOKEN` will be used to access your own code within GitLab. If not set, Pipelines will default to the `CI_JOB_TOKEN` when accessing internal GitLab hosted code.
+
+For GitLab, Gruntwork Pipelines two CI variables. The first, the `PIPELINES_GITLAB_TOKEN` requires the `Developer`, `Maintainer` or `Owner` role and the scopes listed below. This token will be used to authenticate API calls and access repositories within your GitLab group. The second, the `PIPELINES_GITLAB_READ_TOKEN` will be used to access your own code within GitLab. If not set, Pipelines will default to the `CI_JOB_TOKEN` when accessing internal GitLab hosted code.
### Creating the Access Token
Gruntwork recommends [creating](https://docs.gitlab.com/user/project/settings/project_access_tokens/#create-a-project-access-token) two Project or Group Access Tokens as best practice:
+
| Token Name | Required Scopes | Required Role | Purpose |
| ------------------------------- | -------------------------------------------- | ------------------------------- | ---------------------------------------------------------------------------- |
| **PIPELINES_GITLAB_TOKEN** | `api` (and `ai_features` if using GitLab AI) | Developer, Maintainer, or Owner | Making API calls (e.g., creating comments on merge requests) |
@@ -417,6 +436,7 @@ Set an expiration date according to your organization's security policies. We re
:::
**Checklist:**
+
@@ -434,6 +454,7 @@ Add the `PIPELINES_GITLAB_TOKEN` and `PIPELINES_GITLAB_READ_TOKEN` as CI/CD vari
8. Set the value as the Personal Access Token generated in the [Creating the Access Token](#creating-the-access-token) section
**Checklist:**
+
diff --git a/docs/2.0/docs/pipelines/tutorials/deploying-to-aws-gov-cloud.md b/docs/2.0/docs/pipelines/tutorials/deploying-to-aws-gov-cloud.md
index 05a31d2340..40f916d472 100644
--- a/docs/2.0/docs/pipelines/tutorials/deploying-to-aws-gov-cloud.md
+++ b/docs/2.0/docs/pipelines/tutorials/deploying-to-aws-gov-cloud.md
@@ -47,18 +47,13 @@ Fundamentally, there are three places where the GovCloud partition must be set:
This section covers the Pipelines configuration required to deploy an AWS S3 bucket to AWS GovCloud.
-1. Add the account configuration to the bottom of your root-folder `accounts.yml` file.
+1. Create a `vars.yaml` file on your local machine with the following content:
- ```hcl title="accounts.yml"
- $$ACCOUNT_NAME$$:
- id: "$$ACCOUNT_ID$$"
- ```
-2. Create a `vars.yaml` file on your local machine with the following content:
-
-
+
```yaml title="vars.yaml"
AccountName: "$$ACCOUNT_NAME$$"
+ AccountId: "$$ACCOUNT_ID$$"
GitHubOrganization: "$$GITHUB_ORGANIZATION$$"
GitHubRepository: "$$GITHUB_REPOSITORY$$"
DeployBranchName: main # Change this to your default branch from which terragrunt apply should be run by pipelines
@@ -71,6 +66,7 @@ This section covers the Pipelines configuration required to deploy an AWS S3 buc
```yaml title="vars.yaml"
AccountName: "$$ACCOUNT_NAME$$"
+ AccountId: "$$ACCOUNT_ID$$"
GitLabGroup: "$$GITLAB_GROUP$$"
GitLabProject: "$$GITLAB_PROJECT$$"
DeployBranchName: main # Change this to your default branch from which terragrunt apply should be run by pipelines
@@ -84,15 +80,15 @@ This section covers the Pipelines configuration required to deploy an AWS S3 buc
3. We'll now use that `vars.yaml` file as input to [boilerplate](https://github.com/gruntwork-io/boilerplate) to generate the Terragrunt code for the OIDC Provider and IAM roles. From the root of your repository, run the following command:
-
+
```bash
-boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/github-actions-single-account-setup?ref=main" --output-folder . --var-file vars.yaml --non-interactive
+boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/github-actions-single-account-setup?ref=X.Y.Z" --output-folder . --var-file vars.yaml --non-interactive
```
```bash
-boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/gitlab-pipelines-single-account-setup?ref=main" --output-folder . --var-file vars.yaml --non-interactive
+boilerplate --template-url "git@github.com:gruntwork-io/terraform-aws-architecture-catalog.git//templates/gitlab-pipelines-single-account-setup?ref=X.Y.Z" --output-folder . --var-file vars.yaml --non-interactive
```
@@ -130,7 +126,7 @@ aws sts get-caller-identity
In the event you already have an OIDC provider for your SCM in the AWS account you can import the existing one:
-
+
```
cd _global/$$ACCOUNT_NAME$$/github-actions-openid-connect-provider/
diff --git a/docs/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md b/docs/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md
deleted file mode 100644
index 41899c58b2..0000000000
--- a/docs/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.md
+++ /dev/null
@@ -1,136 +0,0 @@
-# Deploying your first Infrastructure Change
-
-import CustomizableValue from '/src/components/CustomizableValue';
-import Tabs from "@theme/Tabs"
-import TabItem from "@theme/TabItem"
-
-In this tutorial, we will guide you through deploying an S3 bucket. This "hello world" example introduces Gruntwork Pipelines and lays the groundwork for using it in production environments.
-
-## What you'll get
-
-By the end of this tutorial, you will have:
-
-- An S3 bucket deployed automatically using Gruntwork Pipelines.
-
-## Prerequisites
-
-Before starting, ensure you have the following:
-
-- Pipelines installed in a GitHub or GitLab repository. Refer to [Setup & Installation](/2.0/docs/pipelines/installation/overview) for more details.
-- Access to a sandbox or development AWS account configured during the Pipelines installation process.
-- Permissions to create a pull request in the GitHub repository where Pipelines is installed.
-
-## Running Your first pipeline
-
-This section covers creating a resource in your AWS account using Pipelines and GitOps workflows. You will define a `terragrunt.hcl` file to create an AWS S3 bucket, push the changes, create a pull/merge request to trigger a `plan` action, and merge the request to run an `apply` action that creates the bucket.
-
-### Adding a new S3 bucket
-
-1. Create the folder structure for the new S3 bucket in your environment. Replace with the account name you are deploying to and with the AWS region where the S3 bucket will be deployed.
-
- ```bash
- mkdir -p $$ACCOUNT_NAME$$/$$REGION$$/$$ACCOUNT_NAME$$/data-storage/s3
- touch $$ACCOUNT_NAME$$/$$REGION$$/region.hcl
- touch $$ACCOUNT_NAME$$/$$REGION$$/$$ACCOUNT_NAME$$/data-storage/s3/terragrunt.hcl
- ```
-
-2. Add the following content to the `region.hcl` file created earlier.
-
- ```hcl title="$$ACCOUNT_NAME$$/$$REGION$$/region.hcl"
- locals {
- aws_region = "$$REGION$$"
- }
- ```
-
-2. Add the Terragrunt code below to the newly created `terragrunt.hcl` file to define the S3 bucket. Replace with your desired bucket name. Ensure the bucket name is unique.
-
- ```hcl title="$$ACCOUNT_NAME$$/$$REGION$$/$$ACCOUNT_NAME$$/data-storage/s3/terragrunt.hcl"
- # ------------------------------------------------------------------------------------------------------
- # DEPLOY GRUNTWORK's S3-BUCKET MODULE
- # ------------------------------------------------------------------------------------------------------
-
- terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/s3-bucket?ref=v0.116.1"
- }
-
- include "root" {
- path = find_in_parent_folders()
- }
-
- inputs = {
- primary_bucket = "$$S3_BUCKET_NAME$$"
- }
- ```
-
-### Planning the changes
-
-
-
-
-1. Create a new branch for your changes.
-2. Commit the changes to your branch and push it.
-3. Create a pull request (PR) against `main` (the default branch in your repository). Refer to this [GitHub tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) for instructions on creating a PR.
-
-After creating the PR, GitHub Actions will automatically execute the workflow defined in `/.github/workflows/pipelines.yml` in your repository.
-
-Once the workflow completes, Pipelines will post a comment on the PR summarizing the `terragrunt plan` output along with a link to the GitHub Actions workflow logs.
-
-
-
-Click the *View full logs* link to see the complete output of the Gruntwork Pipelines run. Locate the *TerragruntExecute* step to review the full `terragrunt plan` generated by your changes.
-
-
-
-
-
-
-1. Create a new branch for your changes.
-2. Commit the changes to your branch and push it.
-3. Create a merge request (MR) against `main` (the default branch in your project). Refer to this [GitLab tutorial](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html) for instructions on creating an MR.
-
-After creating the MR, GitLab CI/CD will automatically execute the pipeline defined in `.gitlab-ci.yml` in your project.
-
-Once the pipeline completes, Pipelines will post a comment on the MR summarizing the `terragrunt plan` output along with a link to the pipeline logs.
-Click the *View Pipeline Logs* link to see the complete output of the Gruntwork Pipelines run. Select the *plan* job to review the full `terragrunt plan` generated by your changes.
-
-
-
-
-### Applying the changes
-
-
-
-
-If you are satisfied with the `terragrunt plan` output, proceed to merge the PR to create the S3 bucket.
-
-Approve the PR and click the `Merge pull request` button to complete the merge. Upon merging, Pipelines will automatically execute an `apply` action to provision the S3 bucket.
-
-
-
-To monitor the workflow run associated with the merged PR:
-
-1. Navigate to the `main` branch of your repository.
-2. Click the Checks icon next to the latest commit at the top of the file explorer.
-3. Click `details` next to the Pipelines workflow to view the `dispatch` job logs.
-
-
-
-
-
-
-If you are satisfied with the `terragrunt plan` output, proceed to merge the MR to create the S3 bucket.
-
-Approve the MR and click the `Merge` button to complete the merge. Upon merging, Pipelines will automatically execute an `apply` action to provision the S3 bucket.
-
-To monitor the pipeline run associated with the merged MR:
-
-1. Navigate to the `main` branch of your project.
-2. Click CI/CD > Pipelines in the left sidebar.
-3. Click on the latest pipeline to view the `apply` job logs.
-
-
-
-
-Congratulations! You have successfully used Gruntwork Pipelines and a GitOps workflow to provision an S3 bucket in AWS. To verify the bucket creation, visit the AWS Management Console and check the S3 service for the bucket.
-
-To clean up the resources created during this tutorial, proceed to the next tutorial: [Destroying infrastructure with Pipelines](/2.0/docs/pipelines/tutorials/destroying-infrastructure#destroying-with-pipelines).
diff --git a/docs/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.mdx b/docs/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.mdx
new file mode 100644
index 0000000000..e3af1b4857
--- /dev/null
+++ b/docs/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change.mdx
@@ -0,0 +1,230 @@
+# Deploying your first Infrastructure Change
+
+import CustomizableValue from '/src/components/CustomizableValue';
+import Tabs from "@theme/Tabs"
+import TabItem from "@theme/TabItem"
+
+In this tutorial, we will guide you through deploying cloud storage. This "hello world" example introduces Gruntwork Pipelines and lays the groundwork for using it in production environments.
+
+## What you'll get
+
+By the end of this tutorial, you will have:
+
+- Cloud storage deployed automatically using Gruntwork Pipelines:
+ - **AWS**: An S3 bucket
+ - **Azure**: A Resource Group and Storage Account
+
+## Prerequisites
+
+Before starting, ensure you have the following:
+
+- Pipelines installed in a GitHub or GitLab repository. Refer to [Setup & Installation](/2.0/docs/pipelines/installation/overview) for more details.
+- Access to a sandbox or development cloud environment (AWS account or Azure subscription) configured during the Pipelines installation process.
+- Permissions to create a pull request in the GitHub/GitLab repository where Pipelines is installed.
+
+## Running Your first pipeline
+
+This section covers creating a cloud storage resource using Pipelines and GitOps workflows. You will define a `terragrunt.hcl` file to create storage, push the changes, create a pull/merge request to trigger a `plan` action, and merge the request to run an `apply` action that creates the resource.
+
+### Adding cloud storage
+
+
+
+
+1. Create the folder structure for the new S3 bucket in your environment. Replace with the account name you are deploying to and with the AWS region where the S3 bucket will be deployed.
+
+ ```bash
+ mkdir -p $$ACCOUNT_NAME$$/$$REGION$$/data-storage/s3
+ touch $$ACCOUNT_NAME$$/$$REGION$$/region.hcl
+ touch $$ACCOUNT_NAME$$/$$REGION$$/data-storage/s3/terragrunt.hcl
+ ```
+
+2. Add the following content to the `region.hcl` file created earlier.
+
+ ```hcl title="$$ACCOUNT_NAME$$/$$REGION$$/region.hcl"
+ locals {
+ aws_region = "$$REGION$$"
+ }
+ ```
+
+3. Add the Terragrunt code below to the newly created `terragrunt.hcl` file to define the S3 bucket. Replace with your desired bucket name. Ensure the bucket name is unique.
+
+ ```hcl title="$$ACCOUNT_NAME$$/$$REGION$$/data-storage/s3/terragrunt.hcl"
+ # ------------------------------------------------------------------------------------------------------
+ # DEPLOY GRUNTWORK's S3-BUCKET MODULE
+ # ------------------------------------------------------------------------------------------------------
+
+ terraform {
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/s3-bucket?ref=v0.116.1"
+ }
+
+ include "root" {
+ path = find_in_parent_folders()
+ }
+
+ inputs = {
+ primary_bucket = "$$S3_BUCKET_NAME$$"
+ }
+ ```
+
+
+
+
+1. Create the folder structure for the new Resource Group and Storage Account in your environment. Replace with the subscription name you are deploying to, with the Azure location where the resources will be deployed, and with your desired resource group name.
+
+ ```bash
+ mkdir -p $$SUBSCRIPTION_NAME$$/$$LOCATION$$/resource-groups/$$RESOURCE_GROUP_NAME$$/resource-group
+ mkdir -p $$SUBSCRIPTION_NAME$$/$$LOCATION$$/resource-groups/$$RESOURCE_GROUP_NAME$$/data-storage/storage-account
+ touch $$SUBSCRIPTION_NAME$$/$$LOCATION$$/region.hcl
+ touch $$SUBSCRIPTION_NAME$$/$$LOCATION$$/resource-groups/$$RESOURCE_GROUP_NAME$$/resource-group/terragrunt.hcl
+ touch $$SUBSCRIPTION_NAME$$/$$LOCATION$$/resource-groups/$$RESOURCE_GROUP_NAME$$/data-storage/storage-account/terragrunt.hcl
+ ```
+
+2. Add the following content to the `region.hcl` file created earlier.
+
+ ```hcl title="$$SUBSCRIPTION_NAME$$/$$LOCATION$$/region.hcl"
+ locals {
+ azure_location = "$$LOCATION$$"
+ }
+ ```
+
+3. Add the Terragrunt code below to define the Resource Group.
+
+ ```hcl title="$$SUBSCRIPTION_NAME$$/$$LOCATION$$/resource-groups/$$RESOURCE_GROUP_NAME$$/resource-group/terragrunt.hcl"
+ # ------------------------------------------------------------------------------------------------------
+ # DEPLOY GRUNTWORK's AZURE RESOURCE GROUP MODULE
+ # ------------------------------------------------------------------------------------------------------
+
+ include "root" {
+ path = find_in_parent_folders("root.hcl")
+ }
+
+ terraform {
+ source = "github.com/gruntwork-io/terragrunt-scale-catalog//modules/azure/resource-group?ref=v1.0.0"
+ }
+
+ inputs = {
+ name = "$$RESOURCE_GROUP_NAME$$"
+ location = "$$LOCATION$$"
+ }
+ ```
+
+4. Add the Terragrunt code below to define the Storage Account with a dependency on the Resource Group. Replace with your desired storage account name. Ensure the name is unique and follows Azure naming conventions (lowercase letters and numbers only, 3-24 characters).
+
+ ```hcl title="$$SUBSCRIPTION_NAME$$/$$LOCATION$$/resource-groups/$$RESOURCE_GROUP_NAME$$/data-storage/storage-account/terragrunt.hcl"
+ # ------------------------------------------------------------------------------------------------------
+ # DEPLOY GRUNTWORK's AZURE STORAGE ACCOUNT MODULE
+ # ------------------------------------------------------------------------------------------------------
+
+ include "root" {
+ path = find_in_parent_folders("root.hcl")
+ }
+
+ terraform {
+ source = "github.com/gruntwork-io/terragrunt-scale-catalog//modules/azure/storage-account?ref=v1.0.0"
+ }
+
+ dependency "resource_group" {
+ config_path = "../../resource-group"
+
+ mock_outputs = {
+ name = "mock-name"
+ }
+ }
+
+ inputs = {
+ name = "$$STORAGE_ACCOUNT_NAME$$"
+ location = "$$LOCATION$$"
+
+ resource_group_name = dependency.resource_group.outputs.name
+ }
+ ```
+
+
+
+
+### Planning the changes
+
+
+
+
+1. Create a new branch for your changes.
+2. Commit the changes to your branch and push it.
+3. Create a pull request (PR) against `main` (the default branch in your repository). Refer to this [GitHub tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) for instructions on creating a PR.
+
+After creating the PR, GitHub Actions will automatically execute the workflow defined in `/.github/workflows/pipelines.yml` in your repository.
+
+Once the workflow completes, Pipelines will post a comment on the PR summarizing the `terragrunt plan` output along with a link to the GitHub Actions workflow logs.
+
+
+
+Click the *View full logs* link to see the complete output of the Gruntwork Pipelines run. Locate the *TerragruntExecute* step to review the full `terragrunt plan` generated by your changes.
+
+
+
+
+
+
+1. Create a new branch for your changes.
+2. Commit the changes to your branch and push it.
+3. Create a merge request (MR) against `main` (the default branch in your project). Refer to this [GitLab tutorial](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html) for instructions on creating an MR.
+
+After creating the MR, GitLab CI/CD will automatically execute the pipeline defined in `.gitlab-ci.yml` in your project.
+
+Once the pipeline completes, Pipelines will post a comment on the MR summarizing the `terragrunt plan` output along with a link to the pipeline logs.
+Click the *View Pipeline Logs* link to see the complete output of the Gruntwork Pipelines run. Select the *plan* job to review the full `terragrunt plan` generated by your changes.
+
+
+
+
+### Applying the changes
+
+
+
+
+If you are satisfied with the `terragrunt plan` output, proceed to merge the PR to create the cloud storage resource.
+
+Approve the PR and click the `Merge pull request` button to complete the merge. Upon merging, Pipelines will automatically execute an `apply` action to provision the storage resource.
+
+
+
+To monitor the workflow run associated with the merged PR:
+
+1. Navigate to the `main` branch of your repository.
+2. Click the Checks icon next to the latest commit at the top of the file explorer.
+3. Click `details` next to the Pipelines workflow to view the `dispatch` job logs.
+
+
+
+
+
+
+If you are satisfied with the `terragrunt plan` output, proceed to merge the MR to create the cloud storage resource.
+
+Approve the MR and click the `Merge` button to complete the merge. Upon merging, Pipelines will automatically execute an `apply` action to provision the storage resource.
+
+To monitor the pipeline run associated with the merged MR:
+
+1. Navigate to the `main` branch of your project.
+2. Click CI/CD > Pipelines in the left sidebar.
+3. Click on the latest pipeline to view the `apply` job logs.
+
+
+
+
+Congratulations! You have successfully used Gruntwork Pipelines and a GitOps workflow to provision cloud storage.
+
+
+
+
+To verify the S3 bucket creation, visit the AWS Management Console and check the S3 service for the bucket.
+
+
+
+
+To verify the Resource Group and Storage Account creation, visit the Azure Portal and navigate to Resource Groups to confirm both resources were created.
+
+
+
+
+To clean up the resources created during this tutorial, proceed to the next tutorial: [Destroying infrastructure with Pipelines](/2.0/docs/pipelines/tutorials/destroying-infrastructure#destroying-with-pipelines).
diff --git a/docs/2.0/docs/pipelines/tutorials/destroying-infrastructure.md b/docs/2.0/docs/pipelines/tutorials/destroying-infrastructure.md
deleted file mode 100644
index 82910f8a9b..0000000000
--- a/docs/2.0/docs/pipelines/tutorials/destroying-infrastructure.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Destroying Infrastructure with Pipelines
-
-import CustomizableValue from '/src/components/CustomizableValue';
-import Tabs from "@theme/Tabs"
-import TabItem from "@theme/TabItem"
-
-This tutorial will help you learn how to destroy infrastructure using Gruntwork Pipelines and GitOps workflows.
-
-## What you'll get
-
-By the end, you'll have:
-
-- Destroyed AWS resources using Gruntwork Pipelines
-
-## Prerequisites
-
-Before starting, make sure you have:
-
-- Pipelines installed in a GitHub/GitLab repository. Refer to [Setup & Installation](/2.0/docs/pipelines/installation/overview) for details.
-- Access to a sandbox or development AWS account configured during the Pipelines installation process.
-- Permissions to create a pull/merge request in the GitHub/GitLab repository where Pipelines is installed.
-- An existing AWS resource created using Infrastructure as Code (IaC) and Pipelines that you want to destroy. If no resource exists, follow the steps in the [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) tutorial to create one, which will then be destroyed in this tutorial.
-
-## Destroying with pipelines
-
-This section explains how to destroy AWS resources using Pipelines and GitOps workflows. The example used is the S3 bucket created in the [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) tutorial, but the process applies to any resource you wish to destroy.
-
-### Delete the infrastructure code
-
-1. Remove the folder containing the infrastructure code for the resource you want to destroy. For the S3 bucket example, delete the folder containing the S3 bucket code. Replace and with the appropriate values.
-
- ```bash
- rm -rf $$ACCOUNT_NAME$$/$$REGION$$/$$ACCOUNT_NAME$$/data-storage/s3
- ```
-
-2. Create a new branch, commit the changes, and push the branch to your repository.
-
-### Planning the destruction
-
-
-
-
-Create a Pull Request (PR) for the branch you just pushed, targeting `main` (the default branch in your repository).
-
-
-
-Gruntwork Pipelines, via GitHub Actions, will detect the removal of the infrastructure unit's code and trigger a `plan` action in Pipelines. This action will display the destructive changes to be made to your AWS account.
-
-
-
-
-
-
-Create a Merge Request (MR) for the branch you just pushed, targeting `main` (the default branch in your project).
-
-Gruntwork Pipelines, via GitLab CI/CD, will detect the removal of the infrastructure unit's code and trigger a `plan` action in Pipelines. This action will display the destructive changes to be made to your AWS account.
-
-Click the *View Pipeline Logs* link to see the complete output of the destroy plan.
-
-
-
-
-### Applying the destruction
-
-
-If you are satisfied with the changes shown in the `plan` action, you can proceed to destroy the S3 bucket.
-
-Approve and merge the pull/merge request to trigger the apply action, permanently deleting the resource from your AWS account.
-
-Congratulations! You have successfully destroyed an AWS resource using Gruntwork Pipelines and GitOps workflows. To verify the resource has been destroyed, check your AWS management console.
diff --git a/docs/2.0/docs/pipelines/tutorials/destroying-infrastructure.mdx b/docs/2.0/docs/pipelines/tutorials/destroying-infrastructure.mdx
new file mode 100644
index 0000000000..dc7c861178
--- /dev/null
+++ b/docs/2.0/docs/pipelines/tutorials/destroying-infrastructure.mdx
@@ -0,0 +1,99 @@
+# Destroying Infrastructure with Pipelines
+
+import CustomizableValue from '/src/components/CustomizableValue';
+import Tabs from "@theme/Tabs"
+import TabItem from "@theme/TabItem"
+
+This tutorial will help you learn how to destroy infrastructure using Gruntwork Pipelines and GitOps workflows.
+
+## What you'll get
+
+By the end, you'll have:
+
+- Destroyed cloud resources (AWS or Azure) using Gruntwork Pipelines
+
+## Prerequisites
+
+Before starting, make sure you have:
+
+- Pipelines installed in a GitHub/GitLab repository. Refer to [Setup & Installation](/2.0/docs/pipelines/installation/overview) for details.
+- Access to a sandbox or development cloud environment (AWS account or Azure subscription) configured during the Pipelines installation process.
+- Permissions to create a pull/merge request in the GitHub/GitLab repository where Pipelines is installed.
+- An existing cloud resource created using Infrastructure as Code (IaC) and Pipelines that you want to destroy. If no resource exists, follow the steps in the [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) tutorial to create one, which will then be destroyed in this tutorial.
+
+## Destroying with pipelines
+
+This section explains how to destroy cloud resources using Pipelines and GitOps workflows. The example used is the resources created in the [Deploying your first infrastructure change](/2.0/docs/pipelines/tutorials/deploying-your-first-infrastructure-change) tutorial, but the process applies to any resource you wish to destroy.
+
+### Delete the infrastructure code
+
+
+
+
+1. Remove the folder containing the infrastructure code for the resource you want to destroy. For the S3 bucket example, delete the folder containing the S3 bucket code. Replace and with the appropriate values.
+
+ ```bash
+ rm -rf $$ACCOUNT_NAME$$/$$REGION$$/data-storage/s3
+ ```
+
+2. Create a new branch, commit the changes, and push the branch to your repository.
+
+
+
+
+1. Remove the folder containing the infrastructure code for the resources you want to destroy. For the Resource Group and Storage Account example, delete the folder containing all the resource group code. Replace , , and with the appropriate values.
+
+ ```bash
+ rm -rf $$SUBSCRIPTION_NAME$$/$$LOCATION$$/resource-groups/$$RESOURCE_GROUP_NAME$$
+ ```
+
+2. Create a new branch, commit the changes, and push the branch to your repository.
+
+
+
+
+### Planning the destruction
+
+
+
+
+Create a Pull Request (PR) for the branch you just pushed, targeting `main` (the default branch in your repository).
+
+
+
+Gruntwork Pipelines, via GitHub Actions, will detect the removal of the infrastructure unit's code and trigger a `plan` action in Pipelines. This action will display the destructive changes to be made to your cloud environment.
+
+
+
+
+
+
+Create a Merge Request (MR) for the branch you just pushed, targeting `main` (the default branch in your project).
+
+Gruntwork Pipelines, via GitLab CI/CD, will detect the removal of the infrastructure unit's code and trigger a `plan` action in Pipelines. This action will display the destructive changes to be made to your cloud environment.
+
+Click the *View Pipeline Logs* link to see the complete output of the destroy plan.
+
+
+
+
+### Applying the destruction
+
+If you are satisfied with the changes shown in the `plan` action, you can proceed to destroy the resources.
+
+Approve and merge the pull/merge request to trigger the apply action, permanently deleting the resources from your cloud environment.
+
+Congratulations! You have successfully destroyed cloud resources using Gruntwork Pipelines and GitOps workflows.
+
+
+
+
+To verify the S3 bucket has been destroyed, check the AWS Management Console and confirm the bucket no longer exists in the S3 service.
+
+
+
+
+To verify the Resource Group and Storage Account have been destroyed, visit the Azure Portal and confirm the Resource Group no longer exists.
+
+
+
diff --git a/docs/2.0/reference/accountfactory/configurations-as-code.md b/docs/2.0/reference/accountfactory/configurations-as-code.md
new file mode 100644
index 0000000000..1818043ea8
--- /dev/null
+++ b/docs/2.0/reference/accountfactory/configurations-as-code.md
@@ -0,0 +1,419 @@
+# Pipelines Account Factory Configurations as Code
+
+import Tabs from "@theme/Tabs"
+import TabItem from "@theme/TabItem"
+import {
+ HclListItem,
+ HclListItemExample,
+ HclListItemDescription,
+ HclListItemTypeDetails,
+ HclListItemDefaultValue,
+ HclGeneralListItem,
+} from "/src/components/HclListItem.tsx"
+
+Pipelines Account Factory uses configurations written in [HashiCorp Configuration Language (HCL)](https://github.com/hashicorp/hcl) to enable dynamic behavior. These configurations determine how Account Factory will provision and configure new AWS accounts using Gruntwork Pipelines.
+
+To process configurations, Pipelines parses all `.hcl` files within a `.gruntwork` directory or a single file named `gruntwork.hcl`. Typically, global configurations relevant to the entire repository are placed in the `.gruntwork` directory at the root hence the file is typically named `account-factory.hcl` and placed in the `.gruntwork` directory at the root of the repository.
+
+:::info
+
+We recommend reviewing our [concepts page](/2.0/docs/pipelines/concepts/hcl-config-language) on the HCL language to ensure familiarity with its features.
+:::
+
+## Basic configuration
+
+Below is an example of a minimal configuration required for AccountFactory:
+
+```hcl
+# .gruntwork/account-factory.hcl
+account_factory {
+ control_tower_module_version = "va.b.c"
+ security_module_version = "va.b.c"
+ architecture_catalog_module_version = "va.b.c"
+ infrastructure_catalog_module_version = "va.b.c"
+ access_control_repository_name = "your-access-control-repository-name"
+ infrastructure_catalog_module_repository_name = "your-infrastructure-catalog-module-repository-name"
+}
+```
+
+## Block Reference
+
+For a more comprehensive walkthrough of how blocks work please see the Pipelines Configurations as Code [concepts](/2.0/reference/pipelines/configurations-as-code).
+
+### `account_factory` block
+
+
+
+Account Factory blocks are used to define configurations that are applicable for provisioning and configuring new AWS accounts.
+
+See detailed attributes configuration options [below](#account_factory-block-attributes).
+
+
+
+```hcl
+# .gruntwork/account-factory.hcl
+account_factory {
+ control_tower_module_version = "va.b.c"
+ security_module_version = "va.b.c"
+ architecture_catalog_module_version = "va.b.c"
+ infrastructure_catalog_module_version = "va.b.c"
+ access_control_repository_name = "your-access-control-repository-name"
+ infrastructure_catalog_module_repository_name = "your-infrastructure-catalog-module-repository-name"
+}
+```
+
+
+
+
+### `account_vending` block
+
+
+
+
+ Account Vending blocks are available for Gruntwork Enterprise customers, nested within [account_factory](#account_factory-block) blocks, to define how additional features such as multi-environment account provisioning and delegated repositories are enabled.
+ Each account-vending configuration block is a template for vending accounts as desired.
+
+ The labels such as "sdlc" or "sandbox" serve as the name of the account-vending configuration block and are the default Gruntwork Provided labels for the account-vending configuration block. Enterprise customers may define their own configuration blocks or modify the Gruntwork Provided blocks but should contact [support@gruntwork.io](mailto:support@gruntwork.io) if they intend to [use the Gruntwork Developer Portal to generate new account requests](/2.0/docs/accountfactory/guides/vend-aws-account?account-creation-method=ui).
+
+
+ Account Vending blocks are used to define configurations that are applicable to a single account vending within a repository. See more [below](#account-vending-block-attributes).
+
+
+
+
+ ```hcl
+ # .gruntwork/account-factory.hcl
+ account_factory {
+ account_vending "sdlc" {
+ account_identifiers = ["dev", "stage", "prod"]
+ catalog_repositories = ["path/to/catalog-repositories"]
+ }
+
+ account_vending "sandbox" {
+ account_identifiers = ["sandbox"]
+ catalog_repositories = ["path/to/catalog-repositories"]
+ }
+ }
+ ```
+
+ In this example, when an account request of type `sdlc` is requested, an account will be created for each of the identifiers; `dev`, `stage`, and `prod` as the suffixes for the new accounts being created. Also, if a "Delegate Management of Account(s)?" option is chosen during account request, the catalog repositories `path/to/catalog-repositories` will be added to the new accounts.
+
+ Similarly, when an account request of type `sandbox` is requested, the account will be created for the identifier `sandbox` as the suffix for the new account being created. Also, if a "Delegate Management of Account(s)?" option is chosen during account request, the catalog repositories `path/to/catalog-repositories` will be added to the new account.
+
+
+
+
+### `ci_collaborator` block
+
+
+
+ A block, nested within an [account_vending](#account_vending-block) block, that adds a GitHub/GitLab team and their permissions to a dedicated infrastructure-live repository if the "Delegate Management of Account(s)?" option is chosen during account request. See detailed attributes configuration options [below](#ci-collaborator-block-attributes).
+
+
+
+
+ ```hcl
+ account_factory {
+ account_vending "sdlc" {
+ account_identifiers = ["dev", "stage", "prod"]
+ catalog_repositories = ["path/to/catalog-repositories"]
+
+ ci_collaborator "a-team" {
+ team: "apple-team"
+ permission: "maintainer"
+ }
+
+ ci_collaborator "b-team" {
+ team: "banana-team"
+ permission: "read"
+ }
+ }
+ }
+ ```
+
+ In this example, the `a-team` will be added as a maintainer and the `b-team` will be added as a read only collaborator to a dedicated infrastructure-live repository if the "Delegate Management of Account(s)?" option is chosen during account request of type `sdlc`.
+
+
+
+## Account Factory Block Attributes
+
+### access_control_template_path
+
+
+
+
+ Path to the access-control-accounts template, in the architecture-catalog repository, to use when provisioning new accounts.
+
+
+
+
+### access_control_repository_name
+
+
+
+
+ The name of your infrastructure-live-access-control repository
+
+
+
+
+### architecture_catalog_module_version
+
+
+
+
+ The version of the architecture catalog module to use when provisioning new accounts.
+
+
+
+
+### architecture_catalog_repo_url
+
+
+
+
+ The URL of the architecture catalog repository to use when provisioning new accounts.
+
+
+
+
+### aws_security_repo_url
+
+
+
+
+ The URL of the aws-security module repository to use when provisioning new accounts.
+
+
+
+
+### aws_utilities_repo_url
+
+
+
+
+ The URL of the aws-utilities module repository to use when provisioning new accounts.
+
+
+
+
+### catalog_tags_location
+
+
+
+
+ The path to the catalog tags file to use when provisioning new accounts.
+
+
+
+
+### cis_service_catalog_repo_url
+
+
+
+
+ The URL of the cis-service-catalog module repository to use when provisioning new accounts.
+
+
+
+
+### control_tower_module_version
+
+
+
+
+ The version of the aws-control-tower module to use when provisioning new accounts.
+
+
+
+
+### control_tower_repo_url
+
+
+
+
+ The URL of the aws-control-tower repository to use when provisioning new accounts.
+
+
+
+
+### delegated_repository_template_path
+
+
+
+
+ The path to the devops-foundations-infrastructure-live-delegated template, in the architecture-catalog repository, to use when provisioning new accounts.
+
+
+
+
+### disable_vpc_inputs
+
+
+
+
+ If set to true, the terragrunt.hcl generated for the VPC in new delegated accounts will not pass any inputs to the VPC module. This is useful for customers with custom VPC configurations: e.g., IPAM, transit subnets, private NAT, etc. All of this custom config can go into vpc-app.hcl in _envcommon directly in the customer's infra-live repo.
+
+
+
+
+
+### infrastructure_catalog_module_repository_name
+
+
+
+
+ The name of your infrastructure-catalog module repository.
+
+
+
+
+### infrastructure_catalog_module_version
+
+
+
+
+ The version of your infrastructure-catalog module repository.
+
+
+
+
+### logs_account_name
+
+
+
+
+ The name of your logs account if different from the default of `logs`.
+
+
+
+
+### management_account_name
+
+
+
+
+ The name of your management account if different from the default of `management`.
+
+
+
+
+### pipelines_read_token_name
+
+
+
+
+ (GitHub only) The name of your pipelines read token if different from the default of `PIPELINES_READ_TOKEN`.
+
+
+
+
+### pipelines_workflow_location
+
+
+
+
+ (GitHub only) The location of your pipelines workflow if different from the default of `gruntwork-io/pipelines-workflows/.github/workflows/pipelines.yml@X`.
+
+
+
+
+### security_account_name
+
+
+
+
+ The name of your security account if different from the default of `security`.
+
+
+
+
+### security_module_version
+
+
+
+
+ The version of aws-security module repository to use when provisioning new accounts.
+
+
+
+
+### shared_account_name
+
+
+
+
+ The name of your shared account if different from the default of `shared`.
+
+
+
+
+### single_account_baseline_template_path
+
+
+
+
+ The path to the single-account-baseline template, in the architecture-catalog repository, to use when provisioning new accounts.
+
+
+
+
+### vpc_module_url
+
+
+
+
+ The URL of the vpc module to use when provisioning new accounts.
+
+
+
+
+### vpc_module_version
+
+
+
+
+ The version of the vpc module to use when provisioning new accounts.
+
+
+
+
+
+## Account Vending Block Attributes
+
+### account_identifiers
+
+
+
+
+A list of account identifiers. When vending accounts with this Account Vending configuration, a new account will be created for each identifier.
+
+
+
+
+### catalog_repositories
+
+
+
+ A list of repositories that contain infrastructure modules that can be easily leveraged as a catalog by delegated repositories vended during account provisioning.
+
+
+
+
+## CI Collaborator Block Attributes
+
+### team
+
+
+
+ The name of the GitHub team or GitLab group to add to a delegated infrastructure-live repository.
+
+
+
+### permission
+
+
+
+ The permission to add to the GitHub team or GitLab group. See respective documentation for [GitHub](ttps://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/managing-repository-roles/repository-roles-for-an-organization#permissions-for-each-role)/[GitLab](https://docs.gitlab.com/user/permissions/).
+
+
diff --git a/docs/2.0/reference/accountfactory/index.md b/docs/2.0/reference/accountfactory/index.md
new file mode 100644
index 0000000000..e6a8b7c202
--- /dev/null
+++ b/docs/2.0/reference/accountfactory/index.md
@@ -0,0 +1,10 @@
+# Overview
+
+import PipelinesLanguageTransitionPartial from '../pipelines/language_transition_partial.mdx'
+
+
+
+## Next Steps
+
+- Explore the [YAML Configurations](/2.0/reference/accountfactory/configurations) reference for detailed guidance.
+- Learn more about the (Beta) [Configurations as Code](/2.0/reference/accountfactory/configurations-as-code) reference.
diff --git a/docs/2.0/reference/pipelines/configurations-as-code/api.md b/docs/2.0/reference/pipelines/configurations-as-code/api.mdx
similarity index 86%
rename from docs/2.0/reference/pipelines/configurations-as-code/api.md
rename to docs/2.0/reference/pipelines/configurations-as-code/api.mdx
index f57baf0886..319964ff41 100644
--- a/docs/2.0/reference/pipelines/configurations-as-code/api.md
+++ b/docs/2.0/reference/pipelines/configurations-as-code/api.mdx
@@ -9,6 +9,7 @@ For a more comprehensive walkthrough of how blocks work please see the Pipelines
### `environment` block
+
Environment blocks are used to define configurations that are applicable to a specific environment within a repository.
@@ -182,6 +183,24 @@ The label applied to an accounts block is the name of the Accounts block. This i
An AWS OIDC authentication block that determines how Pipelines will authenticate with AWS using OIDC. See more [below](#aws_oidc-block-attributes).
+### `azure_oidc` block
+
+
+
+
+An Azure OIDC authentication block that determines how Pipelines will authenticate with Azure using OIDC. See more [below](#azure_oidc-block-attributes).
+
+
+
+
+
+
+### `custom` block
+
+
+
+
+A custom authentication block that determines how Pipelines will authenticate with custom authentication logic when running Terragrunt commands. See more [below](#custom-block-attributes).
@@ -280,7 +299,7 @@ Whether or not Pipelines will consolidate deleted resources when running Terragr
The Infrastructure as Code(Iac) binary that Pipelines will instruct Terragrunt to use. Valid values are:
- `opentofu` (default): Use OpenTofu for managing infrastructure. Gruntwork recommends customers use OpenTofu.
-- `terraform`: Use Terraform for managing infrastructure.
+- `terraform`: Use Terraform for managing infrastructure.
:::note
@@ -344,6 +363,7 @@ This is to make it convenient to tuck the `accounts.yml` file away somewhere in
The AWS account ID that Pipelines will authenticate with.
+
@@ -351,6 +371,7 @@ The AWS account ID that Pipelines will authenticate with.
The IAM role ARN that Pipelines will assume when running Terragrunt plan commands.
+
@@ -358,6 +379,7 @@ The IAM role ARN that Pipelines will assume when running Terragrunt plan command
The IAM role ARN that Pipelines will assume when running Terragrunt apply commands.
+
@@ -365,6 +387,7 @@ The IAM role ARN that Pipelines will assume when running Terragrunt apply comman
The AWS region that Pipelines will use when calling the AWS Security Token Service (STS) to generate authentication tokens.
+
@@ -377,3 +400,53 @@ The duration in seconds that the AWS session will be valid for.
+
+### `azure_oidc` block attributes
+
+
+
+
+The Azure tenant ID that Pipelines will authenticate with.
+
+
+
+
+
+
+
+The Azure subscription ID that Pipelines will authenticate with.
+
+
+
+
+
+
+
+The Azure client ID that Pipelines will authenticate with when running Terragrunt plan commands.
+
+
+
+
+
+
+
+The Azure client ID that Pipelines will authenticate with when running Terragrunt apply commands.
+
+
+
+
+### `custom` block attributes
+
+
+
+
+The command that Pipelines will execute to authenticate with the custom authentication logic.
+
+:::tip
+
+You can learn more about how custom authentication works in the [Custom Authentication](/2.0/docs/pipelines/concepts/cloud-auth/custom) documentation.
+
+:::
+
+
+
diff --git a/docs/2.0/reference/pipelines/configurations-as-code/index.md b/docs/2.0/reference/pipelines/configurations-as-code/index.md
index 991d6bdd20..d454dcf852 100644
--- a/docs/2.0/reference/pipelines/configurations-as-code/index.md
+++ b/docs/2.0/reference/pipelines/configurations-as-code/index.md
@@ -9,10 +9,9 @@ To process configurations, Pipelines parses all `.hcl` files within a `.gruntwor
We recommend reviewing our [concepts page](/2.0/docs/pipelines/concepts/hcl-config-language) on the HCL language to ensure familiarity with its features before configuring Pipelines.
:::
-
## Basic configuration
-The minimum configuration required for Pipelines to function depends on the specific context. In most scenarios, Pipelines must determine how to authenticate with a cloud provider to execute Terragrunt commands. If authentication is not configured where required, Pipelines will generate an error.
+The minimum configuration required for Pipelines to function depends on the specific context. In most scenarios, Pipelines must determine how to authenticate with a cloud provider to execute Terragrunt commands. Unless running Pipelines on a host machine that is already authenticated with a cloud provider (e.g. a self-hosted runner), it is generally required to configure some form of authentication within the `authentication` block.
Below is an example of a minimal configuration for a single Terragrunt unit, demonstrating how to enable Pipelines to authenticate with AWS using OIDC:
@@ -31,6 +30,17 @@ unit {
Placing this configuration in a `gruntwork.hcl` file within the same directory as a `terragrunt.hcl` file directs Pipelines to assume the `role-to-assume-for-plans` role in the AWS account with the ID `an-aws-account-id` when executing Terragrunt plan commands. The authentication process leverages [OIDC](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services) to securely connect to AWS and assume the specified role.
+:::tip
+
+If you are running Pipelines on a host machine that is already authenticated with a cloud provider, you can explicitly leave the `authentication` block empty to signal that Pipelines should not attempt to perform any authentication itself.
+
+```hcl
+unit {
+ authentication {}
+}
+```
+
+:::
It is common for multiple Terragrunt units within a repository to assume the same AWS role. For instance, all units within a specific directory may provision resources for the same AWS account. Configuring AWS authentication at the environment level is more efficient in these cases. You can do this by defining an `environment` block within one of the `.hcl` files in the `.gruntwork` directory at the repository root and specifying the AWS authentication configuration.
@@ -63,7 +73,6 @@ Details regarding the functionality of each configuration type are outlined belo
Pipelines configurations are structured into a hierarchy to manage specificity. Configurations that are more specific to an individual unit of IaC will take precedence over more general configurations in cases of conflict.
-
The configuration hierarchy is as follows:
### Repository configurations
@@ -221,6 +230,7 @@ Job consolidation is a process by which Pipelines merges multiple related jobs (
This optimization significantly reduces CI/CD costs by consolidating Terragrunt execution into fewer jobs, spreading the operational expenses more efficiently. Additionally, it enhances accuracy by allowing Terragrunt to leverage a Directed Acyclic Graph (DAG) for proper sequencing of updates.
For example:
+
- Instead of running the following independent jobs:
A. `ModuleAdded`
B. `ModuleChanged` (which depends on `ModuleAdded`)
@@ -243,6 +253,7 @@ In rare cases, you might disable job consolidation to allocate maximum resources
Configurations must be specified within a single file named gruntwork.hcl, located in the same directory as the terragrunt.hcl file. These configurations are referred to as local configurations and are generally used to define settings specific to a single unit of Infrastructure as Code (IaC) within a repository. .
These configurations can serve two purposes:
+
1. Define all the settings necessary for Pipelines to operate within the scope of a single unit.
2. Override global configurations defined in the `.gruntwork` directory, tailoring them to the unit's specific needs.
@@ -288,7 +299,6 @@ filter {
All configuration blocks containing a `filter` block will apply only to units that match the specified filter.
-
### Authentication blocks
[Full Reference for Authentication Blocks](/2.0/reference/pipelines/configurations-as-code/api#authentication-block)
@@ -296,9 +306,9 @@ All configuration blocks containing a `filter` block will apply only to units th
Authentication blocks are configuration components used by [environment](#environment-blocks) and [unit](#unit-blocks) blocks to specify how Pipelines will authenticate with cloud platforms when executing Terragrunt commands.
:::note
-Authentication blocks encapsulate more specific authentication configurations tailored to individual cloud platforms. When Pipelines processes an `authentication` block, it attempts to authenticate with all cloud platforms defined within it.
-Currently, the only supported block that can be nested within an `authentication` block is `aws_oidc`.
+Authentication blocks encapsulate more specific authentication configurations tailored to individual cloud platforms. When Pipelines processes an `authentication` block, it attempts to authenticate with the relevant cloud platform defined within it.
+
:::
:::tip
diff --git a/docs/2.0/reference/pipelines/index.md b/docs/2.0/reference/pipelines/index.md
index 3da2dc0212..30a59206fe 100644
--- a/docs/2.0/reference/pipelines/index.md
+++ b/docs/2.0/reference/pipelines/index.md
@@ -1,8 +1,10 @@
# Overview
-import PipelinesConfig from './language_auth_partial.mdx'
+import PipelinesLanguageTransitionPartial from './language_transition_partial.mdx'
+import PipelinesAuthPartial from './language_auth_partial.mdx'
-
+
+
## Additional configuration
@@ -10,5 +12,5 @@ In addition to authentication, the primary configuration options involve customi
## Next Steps
-- Explore the [YAML Configurations](/2.0/reference/pipelines/configurations.md) reference for detailed guidance.
-- Learn more about the (Beta) [Configurations as Code](/2.0/reference/pipelines/configurations-as-code/api.md) reference.
+- Explore the [YAML Configurations](/2.0/reference/pipelines/configurations) reference for detailed guidance.
+- Learn more about the (Beta) [Configurations as Code](/2.0/reference/pipelines/configurations-as-code/api) reference.
diff --git a/docs/2.0/reference/pipelines/language_auth_partial.mdx b/docs/2.0/reference/pipelines/language_auth_partial.mdx
index 77a19e56e4..e1e1e316de 100644
--- a/docs/2.0/reference/pipelines/language_auth_partial.mdx
+++ b/docs/2.0/reference/pipelines/language_auth_partial.mdx
@@ -1,13 +1,3 @@
-## Configuration Language Transition
-
-Pipelines configurations are currently undergoing a transition from YAML configurations to new HCL [Configurations as Code](/2.0/reference/pipelines/configurations-as-code/index.md). These new configurations will offer a richer configuration experience, but are not yet required for use. [YAML configurations](/2.0/reference/pipelines/configurations.md) will continue to work as expected for the time being.
-
-YAML configurations are read by Pipelines when HCL configurations are not present, and the Pipelines binary falls back to interpreting YAML configurations as if they were defined in the HCL configuration system in this scenario.
-
-This means that if you have a `.gruntwork/config.yml` file in your repository, you can continue to use it as you have been, and Pipelines will continue to work as expected.
-
-If you do introduce any HCL configurations into your `.gruntwork` directory or introduce a `gruntwork.hcl` file into a directory, Pipelines will begin to use the HCL configuration system instead of the YAML configuration system.
-
## Authentication
Core pipelines functionally generally requires only a small amount of configuration. The most critical configuration for the CI/CD pipeline is how to authenticate with AWS, and that is covered in one of two ways:
diff --git a/docs/2.0/reference/pipelines/language_transition_partial.mdx b/docs/2.0/reference/pipelines/language_transition_partial.mdx
new file mode 100644
index 0000000000..4637e0c9c7
--- /dev/null
+++ b/docs/2.0/reference/pipelines/language_transition_partial.mdx
@@ -0,0 +1,9 @@
+## Configuration Language Transition
+
+Pipelines configurations are currently undergoing a transition from YAML configurations to new HCL [Configurations as Code](/2.0/reference/pipelines/configurations-as-code/index.md). These new configurations will offer a richer configuration experience, but are not yet required for use. [YAML configurations](/2.0/reference/pipelines/configurations.md) will continue to work as expected for the time being.
+
+YAML configurations are read by Pipelines when HCL configurations are not present, and the Pipelines binary falls back to interpreting YAML configurations as if they were defined in the HCL configuration system in this scenario.
+
+This means that if you have a `.gruntwork/config.yml` file in your repository, you can continue to use it as you have been, and Pipelines will continue to work as expected.
+
+If you do introduce any HCL configurations into your `.gruntwork` directory or introduce a `gruntwork.hcl` file into a directory, Pipelines will begin to use the HCL configuration system instead of the YAML configuration system.
diff --git a/sidebars/docs.js b/sidebars/docs.js
index e837285036..3fbe36c9c8 100644
--- a/sidebars/docs.js
+++ b/sidebars/docs.js
@@ -126,9 +126,31 @@ const sidebar = [
},
items: [
{
- label: "Authenticating with Cloud Providers",
- type: "doc",
- id: "2.0/docs/pipelines/concepts/cloud-auth",
+ label: "Authenticating to the Cloud",
+ type: "category",
+ collapsed: false,
+ items: [
+ {
+ label: "Overview",
+ type: "doc",
+ id: "2.0/docs/pipelines/concepts/cloud-auth/index",
+ },
+ {
+ label: "AWS",
+ type: "doc",
+ id: "2.0/docs/pipelines/concepts/cloud-auth/aws",
+ },
+ {
+ label: "Azure",
+ type: "doc",
+ id: "2.0/docs/pipelines/concepts/cloud-auth/azure",
+ },
+ {
+ label: "Custom",
+ type: "doc",
+ id: "2.0/docs/pipelines/concepts/cloud-auth/custom",
+ }
+ ],
},
{
label: "Drift Detection",
@@ -152,9 +174,9 @@ const sidebar = [
},
items: [
{
- label: "Components",
+ label: "Execution flow",
type: "doc",
- id: "2.0/docs/pipelines/architecture/components",
+ id: "2.0/docs/pipelines/architecture/execution-flow",
},
{
label: "Actions",
@@ -203,34 +225,22 @@ const sidebar = [
id: "2.0/docs/pipelines/installation/scm-comparison",
},
{
- label: "Prerequisites",
type: "category",
+ label: "Set up SCM Authentication",
collapsed: false,
items: [
{
- label: "AWS Landing Zone",
- type: "doc",
- id: "2.0/docs/pipelines/installation/prerequisites/awslandingzone",
- },
- ],
- },
- {
- type: "category",
- label: "Enable Auth for Pipelines",
- collapsed: false,
- items: [
- {
- label: "Auth Overview",
+ label: "Overview",
type: "doc",
id: "2.0/docs/pipelines/installation/authoverview",
},
{
- label: "Auth via GitHub App",
+ label: "GitHub App",
type: "doc",
id: "2.0/docs/pipelines/installation/viagithubapp",
},
{
- label: "Auth via Machine Users",
+ label: "Machine Users",
type: "doc",
id: "2.0/docs/pipelines/installation/viamachineusers",
},
@@ -247,17 +257,17 @@ const sidebar = [
collapsed: false,
items: [
{
- label: "Creating a New GitHub Repository with Pipelines",
+ label: "Bootstrap Pipelines in a New GitHub Repository",
type: "doc",
id: "2.0/docs/pipelines/installation/addingnewrepo",
},
{
- label: "Adding Pipelines to an Existing GitHub Repository",
+ label: "Bootstrap Pipelines in an Existing GitHub Repository",
type: "doc",
id: "2.0/docs/pipelines/installation/addingexistingrepo",
},
{
- label: "Adding Branch Protection to a Repository",
+ label: "Adding Branch Protection to a GitHub Repository",
type: "doc",
id: "2.0/docs/pipelines/installation/branch-protection",
},
@@ -269,10 +279,20 @@ const sidebar = [
collapsed: false,
items: [
{
- label: "Adding Pipelines to a GitLab Project",
+ label: "Bootstrap Pipelines in a new GitLab Project",
type: "doc",
id: "2.0/docs/pipelines/installation/addinggitlabrepo",
},
+ {
+ label: "Bootstrap Pipelines in an Existing GitLab Project",
+ type: "doc",
+ id: "2.0/docs/pipelines/installation/addingexistinggitlabrepo",
+ },
+ {
+ label: "Adding Branch Protection to a GitLab Project",
+ type: "doc",
+ id: "2.0/docs/pipelines/installation/gitlab-branch-protection",
+ },
],
},
],
@@ -363,11 +383,6 @@ const sidebar = [
type: "doc",
id: "2.0/docs/pipelines/guides/terragrunt-env-vars",
},
- {
- label: "Setup a Delegated Repository",
- type: "doc",
- id: "2.0/docs/pipelines/guides/setup-delegated-repo",
- },
{
label: "Handling Broken IaC",
type: "doc",
@@ -383,6 +398,11 @@ const sidebar = [
type: "doc",
id: "2.0/docs/pipelines/guides/ignore-files-directories",
},
+ {
+ label: "Unlocking State Locks",
+ type: "doc",
+ id: "2.0/docs/pipelines/guides/unlock"
+ }
],
},
{
@@ -453,12 +473,40 @@ const sidebar = [
type: "doc",
id: "2.0/docs/accountfactory/architecture/network-topology",
},
+ {
+ label: "Repository Topology",
+ type: "doc",
+ id: "2.0/docs/accountfactory/architecture/repository-topology",
+ },
+ ],
+ },
+ {
+ label: "Prerequisites",
+ type: "category",
+ collapsed: false,
+ items: [
+ {
+ label: "AWS Landing Zone",
+ type: "doc",
+ id: "2.0/docs/accountfactory/prerequisites/awslandingzone",
+ },
],
},
{
label: "Setup & Installation",
- type: "doc",
- id: "2.0/docs/accountfactory/installation/index",
+ type: "category",
+ collapsed: true,
+ link: {
+ type: "doc",
+ id: "2.0/docs/accountfactory/installation/index",
+ },
+ items: [
+ {
+ label: "Adding Account Factory to a new repository",
+ type: "doc",
+ id: "2.0/docs/accountfactory/installation/addingnewrepo",
+ },
+ ],
},
{
label: "Guides",
@@ -485,6 +533,11 @@ const sidebar = [
type: "doc",
id: "2.0/docs/accountfactory/guides/delegated-repositories",
},
+ {
+ label: "Setup a Delegated Repository",
+ type: "doc",
+ id: "2.0/docs/accountfactory/guides/setup-delegated-repo",
+ },
{
label: "Adding Collaborators to Delegated Repositories",
type: "doc",
diff --git a/sidebars/reference.js b/sidebars/reference.js
index af2fdc3bee..11bc08e1f8 100644
--- a/sidebars/reference.js
+++ b/sidebars/reference.js
@@ -78,11 +78,21 @@ const sidebar = [
value: "Account Factory",
className: "sidebar-header",
},
+ {
+ label: "Overview",
+ type: "doc",
+ id: "2.0/reference/accountfactory/index",
+ },
{
label: "Configurations",
type: "doc",
id: "2.0/reference/accountfactory/configurations",
},
+ {
+ label: "Configurations as Code (HCL - Beta)",
+ type: "doc",
+ id: "2.0/reference/accountfactory/configurations-as-code",
+ },
]
module.exports = sidebar
diff --git a/src/components/HclListItem.tsx b/src/components/HclListItem.tsx
index 00fc85dc49..97e45cb222 100644
--- a/src/components/HclListItem.tsx
+++ b/src/components/HclListItem.tsx
@@ -1,6 +1,6 @@
import React, { PropsWithChildren } from "react"
import styles from "./HclListItem.module.css"
-import useBrokenLinks from '@docusaurus/useBrokenLinks';
+import useBrokenLinks from "@docusaurus/useBrokenLinks"
interface HclListItemProps {
name: string
@@ -11,7 +11,7 @@ interface HclListItemProps {
export const HclListItem: React.FunctionComponent<
PropsWithChildren
> = ({ name, requirement, type, children }) => {
- useBrokenLinks().collectAnchor(name);
+ useBrokenLinks().collectAnchor(name)
return (