diff --git a/.github/workflows/pr_dispatch.yaml b/.github/workflows/pr_lint.yaml similarity index 61% rename from .github/workflows/pr_dispatch.yaml rename to .github/workflows/pr_lint.yaml index b5353fb82d..cdf8b68fef 100644 --- a/.github/workflows/pr_dispatch.yaml +++ b/.github/workflows/pr_lint.yaml @@ -3,15 +3,17 @@ # Licensed under the MIT License. # -name: PR Dispatch Workflow +name: PR Tflint on: + workflow_dispatch: pull_request: types: [opened, synchronize, reopened] env: TF_VERSION: "1.8.4" TF_LINT_VERSION: "v0.50.3" + jobs: linting: name: Format and Lint Checks @@ -35,7 +37,6 @@ jobs: with: tflint_version: ${{ env.TF_LINT_VERSION }} - - name: Run TFLint with reviewdog uses: reviewdog/action-tflint@v1 with: @@ -43,21 +44,3 @@ jobs: reporter: github-pr-check level: info tflint_init: true - - dispatch: - runs-on: ubuntu-latest - strategy: - matrix: - scenario: - - standalone-scenarios-azuread.json - - standalone-scenarios.json - - standalone-compute.json - - standalone-networking.json - - standalone-scenarios-longrunners.json - - steps: - - name: Repository Dispatch - uses: peter-evans/repository-dispatch@v3 - with: - event-type: pr-${{ matrix.scenario }} - client-payload: '{"scenario": "${{ (matrix.scenario) }}", "sha": "${{ github.event.pull_request.head.sha }}"}' diff --git a/.github/workflows/pr_tests-azuread.yaml b/.github/workflows/pr_tests-azuread.yaml new file mode 100644 index 0000000000..4b82311ca0 --- /dev/null +++ b/.github/workflows/pr_tests-azuread.yaml @@ -0,0 +1,96 @@ +# +# Copyright (c) Microsoft Corporation +# Licensed under the MIT License. +# + +name: PR azuread-tests + +on: + workflow_dispatch: + pull_request: + types: [opened, synchronize, reopened] + paths: + - 'azuread*' + - 'modules/azuread/**' + - 'examples/azuread/**' + - '.github/workflows/*azuread.*' + +env: + SCENARIO: standalone-azuread.json + TF_VERSION: "1.8.4" + TF_LINT_VERSION: "v0.50.3" + +jobs: + load_scenarios: + name: Load Test Scenarios Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.load_scenarios.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + + - id: load_scenarios + run: | + cases=$(cat ./.github/workflows/${{ env.SCENARIO }} | jq -c .) + echo "matrix=${cases}" >> $GITHUB_OUTPUT + + mock_plan_scenarios: + name: ${{ matrix.config_files }} + runs-on: ubuntu-latest + needs: load_scenarios + + strategy: + fail-fast: false + matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create environment variables + run: | + cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} + FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) + echo STATE_FILE=${HOME}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV + echo PLAN_FILE=${HOME}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV + echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV + echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: '20.x' + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: Configure Terraform plugin cache + run: | + echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >>"$GITHUB_ENV" + mkdir --parents "$HOME/.terraform.d/plugin-cache" + + - name: Cache Terraform + uses: actions/cache@v4 + with: + path: | + ~/.terraform.d/plugin-cache + key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} + restore-keys: | + ${{ runner.os }}-terraform- + + - name: Terraform Init example + id: tf_init + run: | + terraform -chdir=examples \ + init + + - name: Terraform Test example + id: tf_test + run: | + terraform -chdir=examples \ + test \ + -test-directory=./tests/mock \ + ${{ env.PARAMETER_FILES }} \ + -verbose \ No newline at end of file diff --git a/.github/workflows/pr_tests-compute.yaml b/.github/workflows/pr_tests-compute.yaml new file mode 100644 index 0000000000..7b4678b323 --- /dev/null +++ b/.github/workflows/pr_tests-compute.yaml @@ -0,0 +1,96 @@ +# +# Copyright (c) Microsoft Corporation +# Licensed under the MIT License. +# + +name: PR compute-tests + +on: + workflow_dispatch: + pull_request: + types: [opened, synchronize, reopened] + paths: + - 'compute_*' + - 'modules/compute/**' + - 'examples/compute/**' + - '.github/workflows/*compute.*' + +env: + SCENARIO: standalone-compute.json + TF_VERSION: "1.8.4" + TF_LINT_VERSION: "v0.50.3" + +jobs: + load_scenarios: + name: Load Test Scenarios Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.load_scenarios.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + + - id: load_scenarios + run: | + cases=$(cat ./.github/workflows/${{ env.SCENARIO }} | jq -c .) + echo "matrix=${cases}" >> $GITHUB_OUTPUT + + mock_plan_scenarios: + name: ${{ matrix.config_files }} + runs-on: ubuntu-latest + needs: load_scenarios + + strategy: + fail-fast: false + matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create environment variables + run: | + cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} + FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) + echo STATE_FILE=${HOME}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV + echo PLAN_FILE=${HOME}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV + echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV + echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: '20.x' + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: Configure Terraform plugin cache + run: | + echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >>"$GITHUB_ENV" + mkdir --parents "$HOME/.terraform.d/plugin-cache" + + - name: Cache Terraform + uses: actions/cache@v4 + with: + path: | + ~/.terraform.d/plugin-cache + key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} + restore-keys: | + ${{ runner.os }}-terraform- + + - name: Terraform Init example + id: tf_init + run: | + terraform -chdir=examples \ + init + + - name: Terraform Test example + id: tf_test + run: | + terraform -chdir=examples \ + test \ + -test-directory=./tests/mock \ + ${{ env.PARAMETER_FILES }} \ + -verbose \ No newline at end of file diff --git a/.github/workflows/pr_tests-dataplat.yaml b/.github/workflows/pr_tests-dataplat.yaml new file mode 100644 index 0000000000..6c1ed84706 --- /dev/null +++ b/.github/workflows/pr_tests-dataplat.yaml @@ -0,0 +1,102 @@ +# +# Copyright (c) Microsoft Corporation +# Licensed under the MIT License. +# + +name: PR dataplat-tests + +on: + workflow_dispatch: + pull_request: + types: [opened, synchronize, reopened] + paths: + - 'mssql*' + - 'modules/analytics/**' + - 'modules/databases/**' + - 'modules/data_factory/**' + - 'modules/purview/**' + - 'examples/analytics/**' + - 'examples/databases/**' + - 'examples/data_factory/**' + - 'examples/purview/**' + - '.github/workflows/*dataplat.*' + +env: + SCENARIO: standalone-dataplat.json + TF_VERSION: "1.8.4" + TF_LINT_VERSION: "v0.50.3" + +jobs: + load_scenarios: + name: Load Test Scenarios Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.load_scenarios.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + + - id: load_scenarios + run: | + cases=$(cat ./.github/workflows/${{ env.SCENARIO }} | jq -c .) + echo "matrix=${cases}" >> $GITHUB_OUTPUT + + mock_plan_scenarios: + name: ${{ matrix.config_files }} + runs-on: ubuntu-latest + needs: load_scenarios + + strategy: + fail-fast: false + matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create environment variables + run: | + cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} + FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) + echo STATE_FILE=${HOME}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV + echo PLAN_FILE=${HOME}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV + echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV + echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: '20.x' + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: Configure Terraform plugin cache + run: | + echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >>"$GITHUB_ENV" + mkdir --parents "$HOME/.terraform.d/plugin-cache" + + - name: Cache Terraform + uses: actions/cache@v4 + with: + path: | + ~/.terraform.d/plugin-cache + key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} + restore-keys: | + ${{ runner.os }}-terraform- + + - name: Terraform Init example + id: tf_init + run: | + terraform -chdir=examples \ + init + + - name: Terraform Test example + id: tf_test + run: | + terraform -chdir=examples \ + test \ + -test-directory=./tests/mock \ + ${{ env.PARAMETER_FILES }} \ + -verbose \ No newline at end of file diff --git a/.github/workflows/pr_tests-networking.yaml b/.github/workflows/pr_tests-networking.yaml new file mode 100644 index 0000000000..0f8579c651 --- /dev/null +++ b/.github/workflows/pr_tests-networking.yaml @@ -0,0 +1,96 @@ +# +# Copyright (c) Microsoft Corporation +# Licensed under the MIT License. +# + +name: PR networking-tests + +on: + workflow_dispatch: + pull_request: + types: [opened, synchronize, reopened] + paths: + - 'network*' + - 'modules/networking/**' + - 'examples/networking/**' + - '.github/workflows/*networking.*' + +env: + SCENARIO: standalone-networking.json + TF_VERSION: "1.8.4" + TF_LINT_VERSION: "v0.50.3" + +jobs: + load_scenarios: + name: Load Test Scenarios Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.load_scenarios.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + + - id: load_scenarios + run: | + cases=$(cat ./.github/workflows/${{ env.SCENARIO }} | jq -c .) + echo "matrix=${cases}" >> $GITHUB_OUTPUT + + mock_plan_scenarios: + name: ${{ matrix.config_files }} + runs-on: ubuntu-latest + needs: load_scenarios + + strategy: + fail-fast: false + matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create environment variables + run: | + cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} + FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) + echo STATE_FILE=${HOME}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV + echo PLAN_FILE=${HOME}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV + echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV + echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: '20.x' + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: Configure Terraform plugin cache + run: | + echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >>"$GITHUB_ENV" + mkdir --parents "$HOME/.terraform.d/plugin-cache" + + - name: Cache Terraform + uses: actions/cache@v4 + with: + path: | + ~/.terraform.d/plugin-cache + key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} + restore-keys: | + ${{ runner.os }}-terraform- + + - name: Terraform Init example + id: tf_init + run: | + terraform -chdir=examples \ + init + + - name: Terraform Test example + id: tf_test + run: | + terraform -chdir=examples \ + test \ + -test-directory=./tests/mock \ + ${{ env.PARAMETER_FILES }} \ + -verbose \ No newline at end of file diff --git a/.github/workflows/pr_tests-scenarios.yaml b/.github/workflows/pr_tests-scenarios.yaml new file mode 100644 index 0000000000..5c1f2c2f55 --- /dev/null +++ b/.github/workflows/pr_tests-scenarios.yaml @@ -0,0 +1,92 @@ +# +# Copyright (c) Microsoft Corporation +# Licensed under the MIT License. +# + +name: PR all-tests + +on: + workflow_dispatch: + pull_request: + types: [opened, synchronize, reopened] + +env: + SCENARIO: standalone-scenarios.json + TF_VERSION: "1.8.4" + TF_LINT_VERSION: "v0.50.3" + +jobs: + load_scenarios: + name: Load Test Scenarios Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.load_scenarios.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + + + - id: load_scenarios + run: | + cases=$(cat ./.github/workflows/${{ env.SCENARIO }} | jq -c .) + echo "matrix=${cases}" >> $GITHUB_OUTPUT + + mock_plan_scenarios: + name: ${{ matrix.config_files }} + runs-on: ubuntu-latest + needs: load_scenarios + + strategy: + fail-fast: false + matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create environment variables + run: | + cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} + FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) + echo STATE_FILE=${HOME}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV + echo PLAN_FILE=${HOME}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV + echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV + echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV + + - name: Install Node + uses: actions/setup-node@v4 + with: + node-version: '20.x' + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: Configure Terraform plugin cache + run: | + echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >>"$GITHUB_ENV" + mkdir --parents "$HOME/.terraform.d/plugin-cache" + + - name: Cache Terraform + uses: actions/cache@v4 + with: + path: | + ~/.terraform.d/plugin-cache + key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} + restore-keys: | + ${{ runner.os }}-terraform- + + - name: Terraform Init example + id: tf_init + run: | + terraform -chdir=examples \ + init + + - name: Terraform Test example + id: tf_test + run: | + terraform -chdir=examples \ + test \ + -test-directory=./tests/mock \ + ${{ env.PARAMETER_FILES }} \ + -verbose \ No newline at end of file diff --git a/.github/workflows/standalone-scenarios-azuread.json b/.github/workflows/standalone-azuread.json similarity index 100% rename from .github/workflows/standalone-scenarios-azuread.json rename to .github/workflows/standalone-azuread.json diff --git a/.github/workflows/standalone-compute.json b/.github/workflows/standalone-compute.json index 0ef269d56b..a1effb7e50 100644 --- a/.github/workflows/standalone-compute.json +++ b/.github/workflows/standalone-compute.json @@ -2,9 +2,9 @@ "config_files": [ "compute/availability_set/100-simple-availabilityset", "compute/availability_set/101-availabilityset-with-proximity-placement-group", - "compute/azure_virtual_desktop/wvd_resources", "compute/azure_redhat_openshift/101_basic_private_cluster", "compute/azure_redhat_openshift/102_basic_public_cluster", + "compute/azure_virtual_desktop/wvd_resources", "compute/batch/batch_account/100-batch-account-storage", "compute/batch/batch_account/100-batch-account", "compute/batch/batch_account/200-batch-account-private-endpoint", @@ -49,6 +49,7 @@ "compute/virtual_machine/214-vm-generic_extensions_complex", "compute/virtual_machine/215-vm-keyvault-for-windows-extension", "compute/virtual_machine/216-vm-linux_diagnostic_extensions", - "compute/virtual_machine/217-vm-disk-encryption-set-msi" + "compute/virtual_machine/217-vm-disk-encryption-set-msi", + "compute/vmware_cluster/101-vmware_cluster" ] } diff --git a/.github/workflows/standalone-compute.yaml b/.github/workflows/standalone-compute.yaml deleted file mode 100644 index 17696aa0e1..0000000000 --- a/.github/workflows/standalone-compute.yaml +++ /dev/null @@ -1,152 +0,0 @@ -# -# Copyright (c) Microsoft Corporation -# Licensed under the MIT License. -# - -name: standalone-compute - -on: - push: - paths: - - 'compute_*' - - 'modules/compute/**' - - 'examples/compute/**' - - '.github/workflows/*compute.*' - -env: - TF_CLI_ARGS: "-no-color" - TF_CLI_ARGS_destroy: "-auto-approve -refresh=false" - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} - ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} - ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} - TF_REGISTRY_DISCOVERY_RETRY: 5 - TF_REGISTRY_CLIENT_TIMEOUT: 15 - ROVER_RUNNER: true - -jobs: - load_scenarios: - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.load_scenarios.outputs.matrix }} - steps: - - uses: actions/checkout@v4 - - id: load_scenarios - run: | - cases=$(cat ./.github/workflows/standalone-compute.json | jq -c .) - echo "matrix=${cases}" >> $GITHUB_OUTPUT - - testcases: - name: test - runs-on: ubuntu-latest - needs: load_scenarios - - strategy: - fail-fast: false - matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} - - container: - image: aztfmod/rover:1.8.4-2405.2306 - options: --user 0 - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Create environment variables - run: | - cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} - FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) - echo STATE_FILE=${TF_DATA_DIR}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV - echo PLAN_FILE=${TF_DATA_DIR}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV - echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV - echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: Terraform Init example - id: tf_init - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - init -upgrade=true | grep -P '^- (?=Downloading|Using|Finding|Installing)|^[^-]' - - - name: Terraform Plan example - id: tf_plan - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - plan \ - ${{ env.PARAMETER_FILES }} \ - -var tags='{testing_job_id='"${{ github.run_id }}"'}' \ - -var var_folder_path=${{ env.CURRENT_FOLDER }} \ - -refresh=true \ - -input=false \ - -state=${{ env.STATE_FILE }} \ - -out=${{ env.PLAN_FILE }} - - - name: Terraform Apply example - id: tf_apply - if: steps.tf_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - apply \ - -parallelism=30 \ - -state=${{ env.STATE_FILE }} \ - ${{ env.PLAN_FILE }} - - - name: Terraform Destroy planning example - id: tf_destroy_plan - if: steps.tf_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - plan \ - ${{ env.PARAMETER_FILES }} \ - -var tags='{testing_job_id='"${{ github.run_id }}"'}' \ - -var var_folder_path=${{ env.CURRENT_FOLDER }} \ - -refresh=true \ - -input=false \ - -destroy \ - -state=${{ env.STATE_FILE }} \ - -out=${{ env.PLAN_FILE }}-destroy - - - name: Terraform Destroy apply example - id: tf_destroy_apply - if: steps.tf_destroy_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - apply \ - -refresh=false \ - -parallelism=30 \ - -auto-approve \ - -state=${{ env.STATE_FILE }} \ - ${{ env.PLAN_FILE }}-destroy - - purge: - name: purge - runs-on: ubuntu-latest - if: ${{ failure() || cancelled() }} - - needs: [testcases] - - container: - image: aztfmod/rover:1.8.4-2405.2306 - options: --user 0 - - steps: - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: Complete purge - run: | - for i in `az monitor diagnostic-settings subscription list -o tsv --query "value[?contains(name, '${{ github.run_id }}' )].name"`; do echo "purging subscription diagnostic-settings: $i" && $(az monitor diagnostic-settings subscription delete --name $i --yes); done - for i in `az monitor log-profiles list -o tsv --query '[].name'`; do az monitor log-profiles delete --name $i; done - for i in `az ad group list --query "[?contains(displayName, '${{ github.run_id }}')].id" -o tsv`; do echo "purging Azure AD group: $i" && $(az ad group delete --verbose --group $i || true); done - for i in `az ad app list --query "[?contains(displayName, '${{ github.run_id }}')].appId" -o tsv`; do echo "purging Azure AD app: $i" && $(az ad app delete --verbose --id $i || true); done - for i in `az keyvault list-deleted --query "[?tags.testing_job_id=='${{ github.run_id }}'].name" -o tsv`; do az keyvault purge --name $i; done - for i in `az group list --query "[?tags.testing_job_id=='${{ github.run_id }}'].name" -o tsv`; do echo "purging resource group: $i" && $(az group delete -n $i -y --no-wait || true); done - for i in `az role assignment list --query "[?contains(roleDefinitionName, '${{ github.run_id }}')].roleDefinitionName" -o tsv`; do echo "purging role assignment: $i" && $(az role assignment delete --role $i || true); done - for i in `az role definition list --query "[?contains(roleName, '${{ github.run_id }}')].roleName" -o tsv`; do echo "purging custom role definition: $i" && $(az role definition delete --name $i || true); done diff --git a/.github/workflows/standalone-dataplat.json b/.github/workflows/standalone-dataplat.json new file mode 100644 index 0000000000..1a1a565a55 --- /dev/null +++ b/.github/workflows/standalone-dataplat.json @@ -0,0 +1,76 @@ +{ + "config_files": [ + "cosmos_db/100-cosmos-db-sql-role-mapping", + "cosmos_db/100-simple-cosmos-db-cassandra", + "cosmos_db/100-simple-cosmos-db-gremlin", + "cosmos_db/100-simple-cosmos-db-mongo", + "cosmos_db/100-simple-cosmos-db-sql", + "cosmos_db/100-simple-cosmos-db-table", + "cosmos_db/101-decomposed-cosmosdb-sql", + "cosmos_db/101-private-endpoint-cosmos-db", + "data_explorer/101-kusto_clusters_basic", + "data_explorer/102-kusto_clusters_vnet", + "data_explorer/103-kusto_clusters_identity", + "data_explorer/104-kusto_cluster_database", + "data_explorer/105-kusto_attached_database_configuration", + "data_explorer/106-database_principal_assignment", + "data_explorer/107-private-endpoint", + "data_factory/101-data_factory", + "data_factory/102-data_factory_pipeline", + "data_factory/103-data_factory_trigger_schedule", + "data_factory/104-data_factory_dataset_azure_blob", + "data_factory/105-data_factory_dataset_cosmosdb_sqlapi", + "data_factory/106-data_factory_dataset_delimited_text", + "data_factory/107-data_factory_dataset_http", + "data_factory/108-data_factory_dataset_json", + "data_factory/109-data_factory_dataset_mysql", + "data_factory/110-data_factory_dataset_postgresql", + "data_factory/111-data_factory_dataset_sql_server_table", + "data_factory/112-data_factory_integration_runtime_azure_ssis", + "data_factory/113-data_factory_integration_runtime_azure_ssis_mssql_server", + "data_factory/114-data_factory_integration_runtime_self_hosted", + "data_factory/115-data_factory_runtime_self_hoste_databricks", + "data_factory/116-data_factory_linked_service_azure_databricks", + "data_protection/100-backup-vault-blob-storage", + "data_protection/101-backup-vault-disk", + "database_migration_services/100-dms", + "databricks/100-standard-databricks-no-vnet", + "databricks/101-standard-databricks-vnet", + "databricks/102-premium-aml", + "databricks/102-premium-databricks-vnet-private-endpoint", + "datalake/101-datalake-storage", + "machine_learning/100-aml", + "machine_learning/101-aml-vnet", + "machine_learning/102-aml-compute_instance", + "mariadb_server/100-simple-mariadb", + "mariadb_server/101-vnet-rule-mariadb", + "mariadb_server/102-private-endpoint-mariadb", + "mariadb_server/103-private-endpoint-with-fw-rule-mariadb", + "mssql_mi/200-mi", + "mssql_server/101-sqlserver-simple", + "mssql_server/102-sqlserver-extend", + "mssql_server/104-sqlserver-elastic_pools", + "mssql_server/105-sqlserver-failover_groups", + "mssql_server/107-sqlserver-db-retention-policy", + "mssql_server/108-sqlserver-db-diagnostics", + "mssql_server/109-sqlserver-network-firewall-rule", + "mysql_flexible_server/100-simple-mysql-flexible", + "mysql_flexible_server/101-delegated-subnet-with-fw-rule", + "mysql_flexible_server/102-advanced-mysql-flexible", + "mysql_server/100-simple-mysql", + "mysql_server/101-vnet-rule-mysql", + "mysql_server/102-private-endpoint-mysql", + "mysql_server/103-private-endpoint-with-fw-rule-mysql", + "postgresql_flexible_server/100-simple-postgresql-flexible", + "postgresql_flexible_server/101-delegated-subnet-with-fw-rule", + "postgresql_flexible_server/102-advanced-postgresql-flexible", + "postgresql_flexible_server/104-private-endpoint", + "postgresql_server/100-simple-postgresql", + "postgresql_server/101-vnet-rule-postgresql", + "postgresql_server/102-private-endpoint-postgresql", + "postgresql_server/103-private-endpoint-with-fw-rule", + "powerbi_embedded/100-simple-powerbi", + "purview/100-purview_account", + "purview/101-purview_account_private_link" + ] +} diff --git a/.github/workflows/standalone-networking.json b/.github/workflows/standalone-networking.json index c1a3a787c9..0697afeee1 100644 --- a/.github/workflows/standalone-networking.json +++ b/.github/workflows/standalone-networking.json @@ -36,6 +36,10 @@ "networking/private_dns/100-private-dns-vnet-links", "networking/private_links/endpoints/centralized", "networking/private_links/endpoints/static_ip", + "networking/virtual_network_gateway/100-expressroute-gateway", + "networking/virtual_network_gateway/101-vpn-site-to-site", + "networking/virtual_network_gateway/102-vpn-site-to-site-active-active", + "networking/virtual_network_gateway/103-vpn-site-to-site-connection", "networking/virtual_network/100-import-rg", "networking/virtual_network/100-simple-vnet-subnets-nsgs", "networking/virtual_network/100-subnet-delegation", @@ -44,7 +48,16 @@ "networking/virtual_network/201-nsg-flow-logs-v1", "networking/virtual_subnets/100-simple-subnet-rbac", "networking/virtual_wan/100-vwan-multi-hubs", + "networking/virtual_wan/101-vwan-hub-firewall-legacy", + "networking/virtual_wan/102-vwan-hub-firewall-secured-vhub", + "networking/virtual_wan/102a-vwan-hub-firewall-secured-vhub-fw-mgr", + "networking/virtual_wan/103-vwan-hub-gw-legacy", + "networking/virtual_wan/104-vwan-hub-gw-spp", + "networking/virtual_wan/105-vwan-hub-route-table", "networking/virtual_wan/106-vwan-hub-routes", - "networking/virtual_wan/108-vwan-vpn-site" + "networking/virtual_wan/108-vwan-vpn-site", + "networking/virtual_wan/109-vwan-vpn-gateway-connection", + "networking/virtual_wan/110-vwan-hub-gw-p2s-keyvault-cert", + "networking/virtual_wan/111-vwan-vpn-gateway-connection-with-nat" ] } diff --git a/.github/workflows/standalone-networking.yaml b/.github/workflows/standalone-networking.yaml deleted file mode 100644 index 3ae0243874..0000000000 --- a/.github/workflows/standalone-networking.yaml +++ /dev/null @@ -1,152 +0,0 @@ -# -# Copyright (c) Microsoft Corporation -# Licensed under the MIT License. -# - -name: standalone-networking - -on: - push: - paths: - - 'network*' - - 'modules/networking/**' - - 'examples/networking/**' - - '.github/workflows/*networking.*' - -env: - TF_CLI_ARGS: "-no-color" - TF_CLI_ARGS_destroy: "-auto-approve -refresh=false" - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} - ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} - ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} - TF_REGISTRY_DISCOVERY_RETRY: 5 - TF_REGISTRY_CLIENT_TIMEOUT: 15 - ROVER_RUNNER: true - -jobs: - load_scenarios: - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.load_scenarios.outputs.matrix }} - steps: - - uses: actions/checkout@v4 - - id: load_scenarios - run: | - cases=$(cat ./.github/workflows/standalone-networking.json | jq -c .) - echo "matrix=${cases}" >> $GITHUB_OUTPUT - - testcases: - name: test - runs-on: ubuntu-latest - needs: load_scenarios - - strategy: - fail-fast: false - matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} - - container: - image: aztfmod/rover:1.8.4-2405.2306 - options: --user 0 - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Create environment variables - run: | - cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} - FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) - echo STATE_FILE=${TF_DATA_DIR}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV - echo PLAN_FILE=${TF_DATA_DIR}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV - echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV - echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: Terraform Init example - id: tf_init - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - init -upgrade=true | grep -P '^- (?=Downloading|Using|Finding|Installing)|^[^-]' - - - name: Terraform Plan example - id: tf_plan - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - plan \ - ${{ env.PARAMETER_FILES }} \ - -var tags='{testing_job_id='"${{ github.run_id }}"'}' \ - -var var_folder_path=${{ env.CURRENT_FOLDER }} \ - -refresh=true \ - -input=false \ - -state=${{ env.STATE_FILE }} \ - -out=${{ env.PLAN_FILE }} - - - name: Terraform Apply example - id: tf_apply - if: steps.tf_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - apply \ - -parallelism=30 \ - -state=${{ env.STATE_FILE }} \ - ${{ env.PLAN_FILE }} - - - name: Terraform Destroy planning example - id: tf_destroy_plan - if: steps.tf_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - plan \ - ${{ env.PARAMETER_FILES }} \ - -var tags='{testing_job_id='"${{ github.run_id }}"'}' \ - -var var_folder_path=${{ env.CURRENT_FOLDER }} \ - -refresh=true \ - -input=false \ - -destroy \ - -state=${{ env.STATE_FILE }} \ - -out=${{ env.PLAN_FILE }}-destroy - - - name: Terraform Destroy apply example - id: tf_destroy_apply - if: steps.tf_destroy_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - apply \ - -refresh=false \ - -parallelism=30 \ - -auto-approve \ - -state=${{ env.STATE_FILE }} \ - ${{ env.PLAN_FILE }}-destroy - - purge: - name: purge - runs-on: ubuntu-latest - if: ${{ failure() || cancelled() }} - - needs: [testcases] - - container: - image: aztfmod/rover:1.8.0-2405.0203 - options: --user 0 - - steps: - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: Complete purge - run: | - for i in `az monitor diagnostic-settings subscription list -o tsv --query "value[?contains(name, '${{ github.run_id }}' )].name"`; do echo "purging subscription diagnostic-settings: $i" && $(az monitor diagnostic-settings subscription delete --name $i --yes); done - for i in `az monitor log-profiles list -o tsv --query '[].name'`; do az monitor log-profiles delete --name $i; done - for i in `az ad group list --query "[?contains(displayName, '${{ github.run_id }}')].id" -o tsv`; do echo "purging Azure AD group: $i" && $(az ad group delete --verbose --group $i || true); done - for i in `az ad app list --query "[?contains(displayName, '${{ github.run_id }}')].appId" -o tsv`; do echo "purging Azure AD app: $i" && $(az ad app delete --verbose --id $i || true); done - for i in `az keyvault list-deleted --query "[?tags.testing_job_id=='${{ github.run_id }}'].name" -o tsv`; do az keyvault purge --name $i; done - for i in `az group list --query "[?tags.testing_job_id=='${{ github.run_id }}'].name" -o tsv`; do echo "purging resource group: $i" && $(az group delete -n $i -y --no-wait || true); done - for i in `az role assignment list --query "[?contains(roleDefinitionName, '${{ github.run_id }}')].roleDefinitionName" -o tsv`; do echo "purging role assignment: $i" && $(az role assignment delete --role $i || true); done - for i in `az role definition list --query "[?contains(roleName, '${{ github.run_id }}')].roleName" -o tsv`; do echo "purging custom role definition: $i" && $(az role definition delete --name $i || true); done diff --git a/.github/workflows/standalone-regressor-tf100.yaml b/.github/workflows/standalone-regressor-tf100.yaml index 22a66c6458..60bf4d639b 100644 --- a/.github/workflows/standalone-regressor-tf100.yaml +++ b/.github/workflows/standalone-regressor-tf100.yaml @@ -18,11 +18,10 @@ on: type: choice default: 'standalone-scenarios.json' options: - - standalone-scenarios-azuread.json + - standalone-azuread.json - standalone-scenarios.json - standalone-compute.json - standalone-networking.json - - standalone-scenarios-longrunners.json env: TF_CLI_ARGS: '-no-color' diff --git a/.github/workflows/standalone-scenarios-additional.json b/.github/workflows/standalone-scenarios-additional.json index 2e0450829d..5c141c24bd 100644 --- a/.github/workflows/standalone-scenarios-additional.json +++ b/.github/workflows/standalone-scenarios-additional.json @@ -14,11 +14,6 @@ "consumption_budget/105-consumption-budget-subscription-aks", "messaging/signalr/100-signalr-simple", "mssql_mi/200-mi-two-regions", - "networking/virtual_network_gateway/100-expressroute-gateway", - "networking/virtual_network_gateway/101-vpn-site-to-site", - "networking/virtual_network_gateway/102-vpn-site-to-site-active-active", - "networking/virtual_network_gateway/103-vpn-site-to-site-connection", - "networking/virtual_wan/100-vwan-multi-hubs", "networking/virtual_wan/101-vwan-hub-firewall-legacy", "networking/virtual_wan/102-vwan-hub-firewall-new", "networking/virtual_wan/103-vwan-hub-gw", diff --git a/.github/workflows/standalone-scenarios-longrunners.json b/.github/workflows/standalone-scenarios-longrunners.json deleted file mode 100644 index 6bb91b665d..0000000000 --- a/.github/workflows/standalone-scenarios-longrunners.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "config_files": [ - "apim/100-basic", - "apim/101-api", - "apim/102-diagnostics", - "apim/103-api_operations", - "apim/104-backend", - "apim/105-api_policy", - "apim/106-api_operation_tag", - "apim/107-api_operation_policy", - "apim/108-api_management_user", - "apim/110-api_management_diagnostic", - "apim/111-api_management_certificate", - "apim/112-api_management_gateway", - "apim/113-api_management_gateway_api", - "apim/114-api-management-group", - "apim/115-api_management_private_virtual_network", - "apim/116-api_management_subscription", - "apim/117-api_management_product", - "apim/118-api_management_platform_stv2", - "app_gateway/301-agw-v1", - "compute/vmware_cluster/101-vmware_cluster", - "mssql_mi/200-mi", - "networking/virtual_network_gateway/100-expressroute-gateway", - "networking/virtual_network_gateway/101-vpn-site-to-site", - "networking/virtual_network_gateway/102-vpn-site-to-site-active-active", - "networking/virtual_network_gateway/103-vpn-site-to-site-connection", - "networking/virtual_wan/101-vwan-hub-firewall-legacy", - "networking/virtual_wan/102-vwan-hub-firewall-secured-vhub", - "networking/virtual_wan/102a-vwan-hub-firewall-secured-vhub-fw-mgr", - "networking/virtual_wan/103-vwan-hub-gw-legacy", - "networking/virtual_wan/104-vwan-hub-gw-spp", - "networking/virtual_wan/105-vwan-hub-route-table", - "networking/virtual_wan/109-vwan-vpn-gateway-connection", - "networking/virtual_wan/110-vwan-hub-gw-p2s-keyvault-cert", - "networking/virtual_wan/111-vwan-vpn-gateway-connection-with-nat", - "redis_cache/100-redis-standard", - "redis_cache/101-redis-diagnostics", - "redis_cache/102-redis-private", - "webapps/appservice-environment/102-simple_asev3" - ] -} diff --git a/.github/workflows/standalone-scenarios.json b/.github/workflows/standalone-scenarios.json index a7f95055b5..36d5c48490 100644 --- a/.github/workflows/standalone-scenarios.json +++ b/.github/workflows/standalone-scenarios.json @@ -1,5 +1,23 @@ { "config_files": [ + "apim/100-basic", + "apim/101-api", + "apim/102-diagnostics", + "apim/103-api_operations", + "apim/104-backend", + "apim/105-api_policy", + "apim/106-api_operation_tag", + "apim/107-api_operation_policy", + "apim/108-api_management_user", + "apim/110-api_management_diagnostic", + "apim/111-api_management_certificate", + "apim/112-api_management_gateway", + "apim/113-api_management_gateway_api", + "apim/114-api-management-group", + "apim/115-api_management_private_virtual_network", + "apim/116-api_management_subscription", + "apim/117-api_management_product", + "apim/118-api_management_platform_stv2", "app_config/100-simple", "app_config/101-private-link", "app_insights/100-all-attributes", @@ -12,69 +30,30 @@ "automation/103-automation-private-endpoints", "automation/104-automation-schedule-runbook", "communication/communication_services/101-communication_service", - "cosmos_db/100-simple-cosmos-db-cassandra", - "cosmos_db/100-simple-cosmos-db-gremlin", - "cosmos_db/100-simple-cosmos-db-mongo", - "cosmos_db/100-simple-cosmos-db-sql", - "cosmos_db/100-simple-cosmos-db-table", - "cosmos_db/100-cosmos-db-sql-role-mapping", - "cosmos_db/101-decomposed-cosmosdb-sql", - "cosmos_db/101-private-endpoint-cosmos-db", - "data_explorer/101-kusto_clusters_basic", - "data_explorer/102-kusto_clusters_vnet", - "data_explorer/103-kusto_clusters_identity", - "data_explorer/104-kusto_cluster_database", - "data_explorer/105-kusto_attached_database_configuration", - "data_explorer/106-database_principal_assignment", - "data_explorer/107-private-endpoint", - "data_factory/101-data_factory", - "data_factory/102-data_factory_pipeline", - "data_factory/103-data_factory_trigger_schedule", - "data_factory/104-data_factory_dataset_azure_blob", - "data_factory/105-data_factory_dataset_cosmosdb_sqlapi", - "data_factory/106-data_factory_dataset_delimited_text", - "data_factory/107-data_factory_dataset_http", - "data_factory/108-data_factory_dataset_json", - "data_factory/109-data_factory_dataset_mysql", - "data_factory/110-data_factory_dataset_postgresql", - "data_factory/111-data_factory_dataset_sql_server_table", - "data_factory/112-data_factory_integration_runtime_azure_ssis", - "data_factory/113-data_factory_integration_runtime_azure_ssis_mssql_server", - "data_factory/114-data_factory_integration_runtime_self_hosted", - "data_factory/115-data_factory_runtime_self_hoste_databricks", - "data_factory/116-data_factory_linked_service_azure_databricks", - "data_protection/100-backup-vault-blob-storage", - "data_protection/101-backup-vault-disk", - "database_migration_services/100-dms", - "databricks/100-standard-databricks-no-vnet", - "databricks/101-standard-databricks-vnet", - "databricks/102-premium-aml", - "databricks/102-premium-databricks-vnet-private-endpoint", - "datalake/101-datalake-storage", + "diagnostics_profiles/100-multiple-destinations", "diagnostics_profiles/100-multiple-destinations", "diagnostics_profiles/101-log-analytics-destination-type-profile", "diagnostics_profiles/200-diagnostics-eventhub-namespaces", + "diagnostics_profiles/200-diagnostics-eventhub-namespaces", + "diagnostics_profiles/201-multi-eventhub-diagnostics", "diagnostics_profiles/201-multi-eventhub-diagnostics", "digital_twins/100-basic", "digital_twins/101-adt-servicebus", "digital_twins/102-digital_twins_instance_eventhub", - "diagnostics_profiles/100-multiple-destinations", - "diagnostics_profiles/200-diagnostics-eventhub-namespaces", - "diagnostics_profiles/201-multi-eventhub-diagnostics", "eventhub/100-simple-eventhub-namespace", "eventhub/101-evh-namespace-with-private-endpoint", "eventhub/102-namespace-and-evh-with-auth-rules", "eventhub/103-eventhub-consumer-groups", "eventhub/104-namespace-and-evh-with-storage", - "keyvault/101-keyvault-policies", - "keyvault/102-keyvault-cert-issuer", - "keyvault/104-keyvault-dynamic-secret", - "keyvault/105-keyvault-dynamic-certificate", "iot/100-iot-hub", "iot/101-iot-hub-endpoints-and-file-upload", "iot/103-iot-hub-with-dps", "iot/110-iot-central-application", "iot/111-iot-security-solution", + "keyvault/101-keyvault-policies", + "keyvault/102-keyvault-cert-issuer", + "keyvault/104-keyvault-dynamic-secret", + "keyvault/105-keyvault-dynamic-certificate", "load_test/100-load-test", "logic_app/100-logic_app_workflow", "logic_app/102-logic_app_integration_account", @@ -84,24 +63,18 @@ "logic_app/106-logic_app_trigger_recurrence", "logic_app/107-logic_app_trigger_custom", "logic_app/109-logic_app_standard_vnet_integration", - "machine_learning/100-aml", - "machine_learning/101-aml-vnet", - "machine_learning/102-aml-compute_instance", "maintenance_configuration/100-maintenance-configuration", "maintenance_configuration/101-maintenance-configuration-schedule", "maintenance_configuration/200-maintenance-configuration-assignment-vm-windows", "maintenance_configuration/201-maintenance-configuration-assignment-vm-linux", - "maps/101-azure-maps-account", "managed_service_identity/100-msi-levels", - "mariadb_server/100-simple-mariadb", - "mariadb_server/101-vnet-rule-mariadb", - "mariadb_server/102-private-endpoint-mariadb", - "mariadb_server/103-private-endpoint-with-fw-rule-mariadb", + "maps/101-azure-maps-account", "messaging/eventgrid/100-simple-eventgrid-topic", "messaging/eventgrid/101-simple-eventgrid-topic-private-endpoint", "messaging/eventgrid/102-eventgrid_subscription", "messaging/eventgrid/200-simple-eventgrid-domain-topic", "messaging/servicebus/100-servicebus-services", + "messaging/servicebus/200-servicebus-privatelink", "messaging/web_pubsub/100-simple-web-pubsub", "messaging/web_pubsub/101-web-pubsub-hub", "messaging/web_pubsub/102-web-pubsub-usermsi", @@ -111,33 +84,8 @@ "monitoring/102-monitor_activity_log_alert", "monitoring/103-monitor_metric_alert", "monitoring/104-log_analytics_storage_insights", - "mssql_server/101-sqlserver-simple", - "mssql_server/102-sqlserver-extend", - "mssql_server/104-sqlserver-elastic_pools", - "mssql_server/105-sqlserver-failover_groups", - "mssql_server/107-sqlserver-db-retention-policy", - "mssql_server/108-sqlserver-db-diagnostics", - "mssql_server/109-sqlserver-network-firewall-rule", - "mysql_flexible_server/100-simple-mysql-flexible", - "mysql_flexible_server/101-delegated-subnet-with-fw-rule", - "mysql_flexible_server/102-advanced-mysql-flexible", - "mysql_server/100-simple-mysql", - "mysql_server/101-vnet-rule-mysql", - "mysql_server/102-private-endpoint-mysql", - "mysql_server/103-private-endpoint-with-fw-rule-mysql", "netapp/101-nfs", "netapp/102-nfs-export-policy", - "postgresql_flexible_server/100-simple-postgresql-flexible", - "postgresql_flexible_server/101-delegated-subnet-with-fw-rule", - "postgresql_flexible_server/102-advanced-postgresql-flexible", - "postgresql_flexible_server/104-private-endpoint", - "postgresql_server/100-simple-postgresql", - "postgresql_server/101-vnet-rule-postgresql", - "postgresql_server/102-private-endpoint-postgresql", - "postgresql_server/103-private-endpoint-with-fw-rule", - "powerbi_embedded/100-simple-powerbi", - "purview/100-purview_account", - "purview/101-purview_account_private_link", "recovery_vault/101-simple-asrv", "recovery_vault/102-asr-protection", "recovery_vault/103-asr-with-private-endpoint", @@ -145,6 +93,9 @@ "recovery_vault/105-asr-with-network-mapping", "recovery_vault/106-backupvault-with-sqldatabase-saphana", "recovery_vault/107-asr-diagnostics", + "redis_cache/100-redis-standard", + "redis_cache/101-redis-diagnostics", + "redis_cache/102-redis-private", "redis_cache/103-redis-private-endpoints", "role_mapping/100-simple-role-mapping", "role_mapping/101-function-app-managed-identity", @@ -171,6 +122,7 @@ "storage_container/101-storage_container", "synapse_analytics/100-synapse", "synapse_analytics/101-synapse-sparkpool", + "webapps/appservice-environment/102-simple_asev3", "webapps/appservice/101-appservice-simple", "webapps/appservice/102-appservice-slots", "webapps/appservice/103-appservice-extend", diff --git a/.github/workflows/standalone-tf100.yaml b/.github/workflows/standalone-tf100.yaml deleted file mode 100644 index 3ba94fd09e..0000000000 --- a/.github/workflows/standalone-tf100.yaml +++ /dev/null @@ -1,161 +0,0 @@ -# -# Copyright (c) Microsoft Corporation -# Licensed under the MIT License. -# - -name: standalone-tf100 - -on: - workflow_dispatch: - inputs: - scenario: - description: "Select the scenario you want to run:" - required: false - type: choice - default: "standalone-scenarios.json" - options: - - standalone-scenarios-azuread.json - - standalone-scenarios.json - - standalone-compute.json - - standalone-networking.json - - standalone-scenarios-longrunners.json - -env: - TF_CLI_ARGS: "-no-color" - TF_CLI_ARGS_destroy: "-auto-approve -refresh=false" - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} - ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} - ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} - TF_REGISTRY_DISCOVERY_RETRY: 5 - TF_REGISTRY_CLIENT_TIMEOUT: 15 - ROVER_RUNNER: true - -jobs: - load_scenarios: - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.load_scenarios.outputs.matrix }} - steps: - - uses: actions/checkout@v4 - - id: load_scenarios - run: | - cases=$(( - cat ./.github/workflows/${{ github.event.inputs.scenario }}) | jq -c .) - echo "matrix=${cases}" >> $GITHUB_OUTPUT - - testcases: - name: test - runs-on: ubuntu-latest - needs: load_scenarios - - strategy: - max-parallel: 20 - fail-fast: false - matrix: ${{fromJSON(needs.load_scenarios.outputs.matrix)}} - - container: - image: aztfmod/rover:1.8.4-2405.2306 - options: --user 0 - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Create environment variables - run: | - cd ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} - FILE_NAME=$(echo ${{ matrix.config_files }} | sed 's./..g' | xargs) - echo STATE_FILE=${TF_DATA_DIR}/tfstates/${FILE_NAME}.tfstate >> $GITHUB_ENV - echo PLAN_FILE=${TF_DATA_DIR}/tfstates/${FILE_NAME}.plan >> $GITHUB_ENV - echo CURRENT_FOLDER=${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} >> $GITHUB_ENV - echo PARAMETER_FILES=$(find ${GITHUB_WORKSPACE}/examples/${{ matrix.config_files }} | grep .tfvars | sed 's/.*/-var-file=&/' | xargs) >> $GITHUB_ENV - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: Terraform Init example - id: tf_init - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - init -upgrade=true | grep -P '^- (?=Downloading|Using|Finding|Installing)|^[^-]' - - - name: Terraform Plan example - id: tf_plan - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - plan \ - ${{ env.PARAMETER_FILES }} \ - -var tags='{testing_job_id='"${{ github.run_id }}"'}' \ - -var var_folder_path=${{ env.CURRENT_FOLDER }} \ - -refresh=true \ - -input=false \ - -state=${{ env.STATE_FILE }} \ - -out=${{ env.PLAN_FILE }} - - - name: Terraform Apply example - id: tf_apply - if: steps.tf_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - apply \ - -parallelism=30 \ - -state=${{ env.STATE_FILE }} \ - ${{ env.PLAN_FILE }} - - - name: Terraform Destroy planning example - id: tf_destroy_plan - if: steps.tf_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - plan \ - ${{ env.PARAMETER_FILES }} \ - -var tags='{testing_job_id='"${{ github.run_id }}"'}' \ - -var var_folder_path=${{ env.CURRENT_FOLDER }} \ - -refresh=true \ - -input=false \ - -destroy \ - -state=${{ env.STATE_FILE }} \ - -out=${{ env.PLAN_FILE }}-destroy - - - name: Terraform Destroy apply example - id: tf_destroy_apply - if: steps.tf_destroy_plan.outcome == 'success' - run: | - terraform -chdir=${GITHUB_WORKSPACE}/examples \ - apply \ - -refresh=false \ - -parallelism=30 \ - -auto-approve \ - -state=${{ env.STATE_FILE }} \ - ${{ env.PLAN_FILE }}-destroy - - purge: - name: purge - runs-on: ubuntu-latest - if: ${{ failure() || cancelled() }} - - needs: [testcases] - - container: - image: aztfmod/rover:1.8.4-2405.2306 - options: --user 0 - - steps: - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: Complete purge - run: | - for i in `az monitor diagnostic-settings subscription list -o tsv --query "value[?contains(name, '${{ github.run_id }}' )].name"`; do echo "purging subscription diagnostic-settings: $i" && $(az monitor diagnostic-settings subscription delete --name $i --yes); done - for i in `az monitor log-profiles list -o tsv --query '[].name'`; do az monitor log-profiles delete --name $i; done - for i in `az ad group list --query "[?contains(displayName, '${{ github.run_id }}')].id" -o tsv`; do echo "purging Azure AD group: $i" && $(az ad group delete --verbose --group $i || true); done - for i in `az ad app list --query "[?contains(displayName, '${{ github.run_id }}')].appId" -o tsv`; do echo "purging Azure AD app: $i" && $(az ad app delete --verbose --id $i || true); done - for i in `az keyvault list-deleted --query "[?tags.testing_job_id=='${{ github.run_id }}'].name" -o tsv`; do az keyvault purge --name $i; done - for i in `az group list --query "[?tags.testing_job_id=='${{ github.run_id }}'].name" -o tsv`; do echo "purging resource group: $i" && $(az group delete -n $i -y --no-wait || true); done - for i in `az role assignment list --query "[?contains(roleDefinitionName, '${{ github.run_id }}')].roleDefinitionName" -o tsv`; do echo "purging role assignment: $i" && $(az role assignment delete --role $i || true); done - for i in `az role definition list --query "[?contains(roleName, '${{ github.run_id }}')].roleName" -o tsv`; do echo "purging custom role definition: $i" && $(az role definition delete --name $i || true); done diff --git a/.github/workflows/weekly_dispatch.yaml b/.github/workflows/weekly_dispatch.yaml new file mode 100644 index 0000000000..fc601ff4d7 --- /dev/null +++ b/.github/workflows/weekly_dispatch.yaml @@ -0,0 +1,34 @@ +# +# Copyright (c) Microsoft Corporation +# Licensed under the MIT License. +# + +name: PR Dispatch Workflow + +on: + schedule: + - cron: '0 5 * * 5' + +env: + TF_VERSION: "1.8.4" + TF_LINT_VERSION: "v0.50.3" + +jobs: + dispatch: + runs-on: ubuntu-latest + strategy: + matrix: + scenario: + - standalone-scenarios-azuread.json + - standalone-scenarios.json + - standalone-compute.json + - standalone-networking.json + - standalone-dataplat.json + + steps: + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + event-type: int-${{ matrix.scenario }} + client-payload: '{"scenario": "${{ (matrix.scenario) }}", "sha": "${{ github.event.pull_request.head.sha }}"}' diff --git a/.github/workflows/pr_workflow.yaml b/.github/workflows/weekly_workflow.yaml similarity index 96% rename from .github/workflows/pr_workflow.yaml rename to .github/workflows/weekly_workflow.yaml index 67dd50f950..9b645ac6ac 100644 --- a/.github/workflows/pr_workflow.yaml +++ b/.github/workflows/weekly_workflow.yaml @@ -7,7 +7,7 @@ name: PR tests on: repository_dispatch: - types: [pr-*] + types: [int-*] workflow_dispatch: inputs: scenario: @@ -16,14 +16,11 @@ on: type: choice default: "standalone-networking.json" options: - - standalone-scenarios-azuread.json + - standalone-azuread.json - standalone-scenarios.json - standalone-compute.json - standalone-networking.json - - standalone-scenarios-longrunners.json - -permissions: - contents: read + - standalone-dataplat.json env: DEFAULT_SCENARIO: "standalone-networking.json" @@ -38,9 +35,6 @@ jobs: matrix: ${{ steps.load_scenarios.outputs.matrix }} steps: - uses: actions/checkout@v4 - with: - ref: ${{ github.event.client_payload.sha }} - - id: load_scenarios run: | echo "Scenario: ${{ github.event.client_payload.scenario }}" @@ -63,8 +57,6 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 - with: - ref: ${{ github.event.client_payload.sha }} - name: Create environment variables run: | @@ -115,6 +107,10 @@ jobs: -verbose terraform_integration_tests: + permissions: + id-token: write + contents: read + name: Integration-${{ matrix.config_files }} runs-on: ubuntu-latest if: always() @@ -130,8 +126,6 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 - with: - ref: ${{ github.event.client_payload.sha }} - name: Create environment variables run: | @@ -160,7 +154,7 @@ jobs: key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} restore-keys: | ${{ runner.os }}-terraform- - + - name: Azure Login uses: azure/login@v2 with: @@ -220,6 +214,10 @@ jobs: ${{ env.PLAN_FILE }}-destroy purge: + permissions: + id-token: write + contents: read + name: Purge Integration Environment runs-on: ubuntu-latest if: ${{ failure() || cancelled() }} diff --git a/azuread_users.tf b/azuread_users.tf index f75e57cc13..ac8d3d4860 100644 --- a/azuread_users.tf +++ b/azuread_users.tf @@ -4,7 +4,7 @@ module "azuread_users" { source = "./modules/azuread/users" - depends_on = [module.keyvault_access_policies, time_sleep.azurerm_role_assignment_for.0] + depends_on = [module.keyvault_access_policies, time_sleep.azurerm_role_assignment_for[0]] for_each = local.azuread.azuread_users client_config = local.client_config diff --git a/compute_virtual_machines.tf b/compute_virtual_machines.tf index 9e4f1bd07d..4152a9ecb0 100644 --- a/compute_virtual_machines.tf +++ b/compute_virtual_machines.tf @@ -12,8 +12,9 @@ module "virtual_machines" { module.packer_service_principal, module.proximity_placement_groups, module.storage_account_blobs, - time_sleep.azurerm_role_assignment_for.0 + time_sleep.azurerm_role_assignment_for[0] ] + for_each = local.compute.virtual_machines application_security_groups = local.combined_objects_application_security_groups diff --git a/compute_virtual_machines_scale_sets.tf b/compute_virtual_machines_scale_sets.tf index 574860dc77..175e213d33 100644 --- a/compute_virtual_machines_scale_sets.tf +++ b/compute_virtual_machines_scale_sets.tf @@ -13,8 +13,9 @@ module "virtual_machine_scale_sets" { module.packer_build, module.packer_service_principal, module.proximity_placement_groups, - time_sleep.azurerm_role_assignment_for.0 + time_sleep.azurerm_role_assignment_for[0] ] + for_each = local.compute.virtual_machine_scale_sets availability_sets = local.combined_objects_availability_sets diff --git a/examples/apim/118-api_management_platform_stv2/configuration.tfvars b/examples/apim/118-api_management_platform_stv2/configuration.tfvars index 605fed4e7d..59892b21df 100644 --- a/examples/apim/118-api_management_platform_stv2/configuration.tfvars +++ b/examples/apim/118-api_management_platform_stv2/configuration.tfvars @@ -26,20 +26,20 @@ vnets = { region = "region1" vnet = { name = "example-uks" # prefix-vnet-example-uks - address_space = ["10.0.0.0/16"] + address_space = ["10.0.0.0/16"] } subnets = { - + # Example subnet for APIM private endpoint - + snet_example_apim_uks = { - name = "example-apim-uks" #prefix-snet-example-apim-uks - cidr = ["10.0.1.0/24"] - nsg_key = "nsg_example_apim_uks" - # route_table_key = "" + name = "example-apim-uks" #prefix-snet-example-apim-uks + cidr = ["10.0.1.0/24"] + nsg_key = "nsg_example_apim_uks" + # route_table_key = "" service_endpoints = ["Microsoft.KeyVault", "Microsoft.Storage", "Microsoft.Sql", "Microsoft.EventHub", "Microsoft.ServiceBus"] # service endpoints required for APIM } - } + } } } @@ -142,31 +142,31 @@ public_ip_addresses = { api_management = { apim_uks = { - name = "example-uks" # prefix-apim-example-uks - resource_group_key = "rg_example_apim_uks" - publisher_name = "apim.example.sre.com" - publisher_email = "example.apim@sre.com" - sku_name = "Developer_1" # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/api_management#sku_name - region = "region1" + name = "example-uks" # prefix-apim-example-uks + resource_group_key = "rg_example_apim_uks" + publisher_name = "apim.example.sre.com" + publisher_email = "example.apim@sre.com" + sku_name = "Developer_1" # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/api_management#sku_name + region = "region1" # Required to deploy APIM on platform verions stv2.* - public_ip_address = { - key = "pip_apim_uks" - # lz_key = "" - } + public_ip_address = { + key = "pip_apim_uks" + # lz_key = "" + } virtual_network_type = "Internal" # The type of virtual network you want to use, valid values include: None, External, Internal. Defaults to None. virtual_network_configuration = { vnet_key = "vnet_example_uks" subnet_key = "snet_example_apim_uks" - # lz_key = "" + # lz_key = "" } identity = { type = "UserAssigned" managed_identity_keys = ["msi_apim_uks"] } - + portal = { host_name = "example.apim.com" } diff --git a/examples/compute/kubernetes_services/108-single-cluster-remote-adgroup-admin/aks.tfvars b/examples/compute/kubernetes_services/108-single-cluster-remote-adgroup-admin/aks.tfvars index f147de340c..da406c0403 100644 --- a/examples/compute/kubernetes_services/108-single-cluster-remote-adgroup-admin/aks.tfvars +++ b/examples/compute/kubernetes_services/108-single-cluster-remote-adgroup-admin/aks.tfvars @@ -39,7 +39,7 @@ aks_clusters = { addon_profile = { oms_agent = { - log_analytics_key = "central_logs_region1" + log_analytics_key = "central_logs_region1" msi_auth_for_monitoring_enabled = true } } diff --git a/examples/compute/virtual_machine/113-single-linux-custom-data-template/configuration.tfvars b/examples/compute/virtual_machine/113-single-linux-custom-data-template/configuration.tfvars index 35822f8398..42b9ea57b3 100644 --- a/examples/compute/virtual_machine/113-single-linux-custom-data-template/configuration.tfvars +++ b/examples/compute/virtual_machine/113-single-linux-custom-data-template/configuration.tfvars @@ -63,23 +63,23 @@ virtual_machines = { disable_password_authentication = true custom_data = { - templatefile = "compute/virtual_machine/113-single-linux-custom-data-template/custom_data.tpl" - my_value = "my_value" + templatefile = "compute/virtual_machine/113-single-linux-custom-data-template/custom_data.tpl" + my_value = "my_value" } dynamic_custom_data = { vnets = { - vnet_region1 ={} + vnet_region1 = {} } storage_accounts = { sa1 = { - file_share = "share1" - file_share_directory = "dir1" + file_share = "share1" + file_share_directory = "dir1" } } keyvault_keys = { key1 = { keyvault_key = "example_vm_rg1" - name = "disk-key" + name = "disk-key" } } } diff --git a/examples/databricks_access_connectors/100-databricks_access_connectors/configuration.tfvars b/examples/databricks_access_connectors/100-databricks_access_connectors/configuration.tfvars index b3dd3e7fb8..e7ece91817 100644 --- a/examples/databricks_access_connectors/100-databricks_access_connectors/configuration.tfvars +++ b/examples/databricks_access_connectors/100-databricks_access_connectors/configuration.tfvars @@ -13,16 +13,16 @@ resource_groups = { databricks_access_connectors = { dac_1 = { - name = "example-name" - resource_group_key = "dac_test" - identity = { - type = "UserAssigned" #SystemAssigned - managed_identity_keys = ["dac_test"] + name = "example-name" + resource_group_key = "dac_test" + identity = { + type = "UserAssigned" #SystemAssigned + managed_identity_keys = ["dac_test"] + } + tags = { + test = "test" + test1 = "test1" } - tags = { - test = "test" - test1 = "test1" - } } } diff --git a/examples/digital_twins/101-adt-servicebus/configuration.tfvars b/examples/digital_twins/101-adt-servicebus/configuration.tfvars index 94209583e9..de91dda74c 100644 --- a/examples/digital_twins/101-adt-servicebus/configuration.tfvars +++ b/examples/digital_twins/101-adt-servicebus/configuration.tfvars @@ -1,7 +1,7 @@ global_settings = { default_region = "region1" regions = { - region1 = "southeastasia" + region1 = "australiaeast" } } @@ -106,6 +106,7 @@ servicebus_namespaces = { sku = "Premium" # Basic | standard | Premium capacity = 1 # capacity only for Premium: 1,2,4,8,16 otherwise 0 # zone_redundant = false # only true for Premium + premium_messaging_partitions = 1 # tags = {} # optional namespace_auth_rules = { rule1 = { diff --git a/examples/iot/100-iot-hub/configuration.tfvars b/examples/iot/100-iot-hub/configuration.tfvars index 28769ed613..835b5ddaf8 100644 --- a/examples/iot/100-iot-hub/configuration.tfvars +++ b/examples/iot/100-iot-hub/configuration.tfvars @@ -43,7 +43,7 @@ iot_hub_certificate = { key = "iothub1" } is_verified = true - certificate_content = "iot/100-iot-hub/cert.pem" + certificate_content = "examples/iot/100-iot-hub/cert.pem" } } diff --git a/examples/iot/103-iot-hub-with-dps/configuration.tfvars b/examples/iot/103-iot-hub-with-dps/configuration.tfvars index 11b85e838c..5a89365040 100644 --- a/examples/iot/103-iot-hub-with-dps/configuration.tfvars +++ b/examples/iot/103-iot-hub-with-dps/configuration.tfvars @@ -67,7 +67,7 @@ iot_dps_certificate = { key = "dps1" } resource_group_key = "ioth_region1" - certificate_content = "iot/103-iot-hub-with-dps/cert.pem" + certificate_content = "examples/iot/103-iot-hub-with-dps/cert.pem" } } diff --git a/examples/main.tf b/examples/main.tf index 64a74b9c1d..1834c20c66 100644 --- a/examples/main.tf +++ b/examples/main.tf @@ -57,6 +57,9 @@ provider "azurerm" { features {} } + +provider "azuread" {} + data "azurerm_client_config" "default" {} locals { diff --git a/examples/maintenance_configuration/200-maintenance-configuration-assignment-vm-windows/configuration.tfvars b/examples/maintenance_configuration/200-maintenance-configuration-assignment-vm-windows/configuration.tfvars index dae867700e..dc7b8369b2 100644 --- a/examples/maintenance_configuration/200-maintenance-configuration-assignment-vm-windows/configuration.tfvars +++ b/examples/maintenance_configuration/200-maintenance-configuration-assignment-vm-windows/configuration.tfvars @@ -94,7 +94,7 @@ virtual_machines = { priority = "Spot" eviction_policy = "Deallocate" - patch_mode = "AutomaticByPlatform" + patch_mode = "AutomaticByPlatform" bypass_platform_safety_checks_on_user_schedule_enabled = true # When you want to load the file from the folder in the custom_data always use the relative path from the caf_solution in landing zones custom_data = "../../examples/compute/virtual_machine/101-single-windows-vm/scripts/custom.ps1" diff --git a/examples/maintenance_configuration/201-maintenance-configuration-assignment-vm-linux/configuration.tfvars b/examples/maintenance_configuration/201-maintenance-configuration-assignment-vm-linux/configuration.tfvars index fa7f0951d4..2da8bdcddc 100644 --- a/examples/maintenance_configuration/201-maintenance-configuration-assignment-vm-linux/configuration.tfvars +++ b/examples/maintenance_configuration/201-maintenance-configuration-assignment-vm-linux/configuration.tfvars @@ -6,7 +6,7 @@ global_settings = { } inherit_tags = true - + resource_defaults = { virtual_machines = { # set the below to enable az managed boot diagostics for vms @@ -70,17 +70,17 @@ virtual_machines = { # Spot VM to save money priority = "Spot" eviction_policy = "Deallocate" - - patch_mode = "AutomaticByPlatform" + + patch_mode = "AutomaticByPlatform" bypass_platform_safety_checks_on_user_schedule_enabled = true # Value of the nic keys to attach the VM. The first one in the list is the default nic network_interface_keys = ["nic0"] os_disk = { - name = "example_vm1-os" - caching = "ReadWrite" - storage_account_type = "Standard_LRS" + name = "example_vm1-os" + caching = "ReadWrite" + storage_account_type = "Standard_LRS" } identity = { type = "SystemAssigned" #SystemAssigned OR UserAssigned OR SystemAssigned, UserAssigned @@ -105,10 +105,10 @@ virtual_machines = { name = "server1-data1" storage_account_type = "Standard_LRS" # Only Empty is supported. More community contributions required to cover other scenarios - create_option = "Empty" - disk_size_gb = "10" - lun = 1 - zones = ["1"] + create_option = "Empty" + disk_size_gb = "10" + lun = 1 + zones = ["1"] } } } diff --git a/examples/servicebus/200-servicebus-privatelink/configuration.tfvars b/examples/messaging/servicebus/200-servicebus-privatelink/configuration.tfvars similarity index 100% rename from examples/servicebus/200-servicebus-privatelink/configuration.tfvars rename to examples/messaging/servicebus/200-servicebus-privatelink/configuration.tfvars diff --git a/examples/mssql_mi/200-mi/configuration.tfvars b/examples/mssql_mi/200-mi/configuration.tfvars index 962c048b99..8eea3a3122 100644 --- a/examples/mssql_mi/200-mi/configuration.tfvars +++ b/examples/mssql_mi/200-mi/configuration.tfvars @@ -21,7 +21,7 @@ vnets = { resource_group_key = "networking_region1" vnet = { name = "sqlmi-rg1" - address_space = ["172.25.88.0/21","10.2.0.0/24"] + address_space = ["172.25.88.0/21", "10.2.0.0/24"] } subnets = { sqlmi1 = { diff --git a/examples/mssql_mi/200-mi/nsg.tfvars b/examples/mssql_mi/200-mi/nsg.tfvars index d07e9224d5..d5bb2718bb 100644 --- a/examples/mssql_mi/200-mi/nsg.tfvars +++ b/examples/mssql_mi/200-mi/nsg.tfvars @@ -84,6 +84,6 @@ network_security_group_definition = { ] } subnet02 = { - nsg= [] + nsg = [] } } diff --git a/examples/networking/private_links/endpoints/100-azure-open-ai-private-endpoint/configuration.tfvars b/examples/networking/private_links/endpoints/100-azure-open-ai-private-endpoint/configuration.tfvars index 73220fa1ca..e694349351 100644 --- a/examples/networking/private_links/endpoints/100-azure-open-ai-private-endpoint/configuration.tfvars +++ b/examples/networking/private_links/endpoints/100-azure-open-ai-private-endpoint/configuration.tfvars @@ -16,9 +16,9 @@ cognitive_services_account = { resource_group = { key = "rg1" } - name = "pineconellmdemoopenai1" - kind = "OpenAI" - sku_name = "S0" + name = "pineconellmdemoopenai1" + kind = "OpenAI" + sku_name = "S0" custom_subdomain_name = "cs-alz-caf-test-b" #log_analytics_key = "la1" } @@ -34,9 +34,9 @@ vnets = { specialsubnets = {} subnets = { private_endpoints_subnet = { - name = "PrivateEndpoints" - cidr = ["10.0.16.0/24"] - enforce_private_endpoint_network_policies = true + name = "PrivateEndpoints" + cidr = ["10.0.16.0/24"] + enforce_private_endpoint_network_policies = true enforce_private_link_endpoint_network_policies = false } } @@ -58,12 +58,12 @@ private_dns = { private_endpoints = { vnet1 = { - vnet_key = "vnet1" - subnet_keys = ["private_endpoints_subnet"] + vnet_key = "vnet1" + subnet_keys = ["private_endpoints_subnet"] resource_group_key = "rg1" - cognitive_services_account = { - my_account = { - private_service_connection = { + cognitive_services_account = { + my_account = { + private_service_connection = { name = "CognitiveServicesPrivateEndpoint" } private_dns = { diff --git a/examples/networking/virtual_network_gateway/103-vpn-site-to-site-connection/configuration.tfvars b/examples/networking/virtual_network_gateway/103-vpn-site-to-site-connection/configuration.tfvars index a0932a6965..72638fb57a 100644 --- a/examples/networking/virtual_network_gateway/103-vpn-site-to-site-connection/configuration.tfvars +++ b/examples/networking/virtual_network_gateway/103-vpn-site-to-site-connection/configuration.tfvars @@ -78,7 +78,7 @@ virtual_network_gateway_connections = { connection1 = { name = "connection" resource_group_key = "vpngw" - type = "IPSec" + type = "IPsec" region = "region1" virtual_network_gateway_key = "gateway1" local_network_gateway_key = "local1" diff --git a/examples/recovery_vault/106-backupvault-with-sqldatabase-saphana/recovery_vaults.tfvars b/examples/recovery_vault/106-backupvault-with-sqldatabase-saphana/recovery_vaults.tfvars index b744f026c2..b740c02701 100644 --- a/examples/recovery_vault/106-backupvault-with-sqldatabase-saphana/recovery_vaults.tfvars +++ b/examples/recovery_vault/106-backupvault-with-sqldatabase-saphana/recovery_vaults.tfvars @@ -10,11 +10,11 @@ recovery_vaults = { backup_policies = { vm_workloads = { sql = { - name = "SQLTest" - workload_type = "SQLDataBase" - vault_key = "asr1" - rg_key = "primary" - timezone = "UTC" + name = "SQLTest" + workload_type = "SQLDataBase" + vault_key = "asr1" + rg_key = "primary" + timezone = "UTC" compression_enabled = false protection_policies = { sqlfull = { @@ -23,8 +23,8 @@ recovery_vaults = { frequency = "Daily" time = "15:00" } - retention_daily = { - count = 8 + retention_daily = { + count = 8 } } sqllog = { @@ -39,11 +39,11 @@ recovery_vaults = { } } saphana = { - name = "SAPHANATest" - workload_type = "SAPHanaDatabase" - vault_key = "asr1" - rg_key = "primary" - timezone = "UTC" + name = "SAPHANATest" + workload_type = "SAPHanaDatabase" + vault_key = "asr1" + rg_key = "primary" + timezone = "UTC" compression_enabled = false protection_policies = { saphanafull = { @@ -52,8 +52,8 @@ recovery_vaults = { frequency = "Daily" time = "15:00" } - retention_daily = { - count = 8 + retention_daily = { + count = 8 } } saphanalog = { @@ -65,7 +65,7 @@ recovery_vaults = { count = 8 } } - } + } } } } diff --git a/examples/role_mapping/102-azure-openai-managed-identity/configuration.tfvars b/examples/role_mapping/102-azure-openai-managed-identity/configuration.tfvars index 875f053a47..873eb25a94 100644 --- a/examples/role_mapping/102-azure-openai-managed-identity/configuration.tfvars +++ b/examples/role_mapping/102-azure-openai-managed-identity/configuration.tfvars @@ -16,16 +16,16 @@ cognitive_services_account = { resource_group = { key = "rg1" } - name = "pinecone-llm-demoopenai" - kind = "OpenAI" - sku_name = "S0" + name = "pinecone-llm-demoopenai" + kind = "OpenAI" + sku_name = "S0" custom_subdomain_name = "cs-alz-caf-llm-demoopenai" } } managed_identities = { workload-msi = { - name = "example-msi-openai-rolemap-msi" + name = "example-msi-openai-rolemap-msi" resource_group_key = "rg1" } } diff --git a/examples/search_service/100-search-service-both-apikeys-and-azuread/configuration.tfvars b/examples/search_service/100-search-service-both-apikeys-and-azuread/configuration.tfvars index 2d302c73f8..a849c3cfca 100644 --- a/examples/search_service/100-search-service-both-apikeys-and-azuread/configuration.tfvars +++ b/examples/search_service/100-search-service-both-apikeys-and-azuread/configuration.tfvars @@ -25,8 +25,8 @@ vnets = { } subnets = { default = { - name = "default" - cidr = ["10.5.1.0/24"] + name = "default" + cidr = ["10.5.1.0/24"] } } } diff --git a/examples/search_service/102-search-service-only-azuread/configuration.tfvars b/examples/search_service/102-search-service-only-azuread/configuration.tfvars index fa4190a82c..fd91a13b81 100644 --- a/examples/search_service/102-search-service-only-azuread/configuration.tfvars +++ b/examples/search_service/102-search-service-only-azuread/configuration.tfvars @@ -22,7 +22,7 @@ search_services = { identity = { type = "SystemAssigned" } - local_authentication_enabled = false + local_authentication_enabled = false # public_network_access_enabled = true # allowed_ips = ["13.478.57.73"] } diff --git a/examples/servicebus/100-servicebus-simple/configuration.tfvars b/examples/servicebus/100-servicebus-simple/configuration.tfvars deleted file mode 100644 index ac6557c032..0000000000 --- a/examples/servicebus/100-servicebus-simple/configuration.tfvars +++ /dev/null @@ -1,243 +0,0 @@ -global_settings = { - default_region = "region1" - regions = { - region1 = "australiaeast" - } -} - - -resource_groups = { - rg1 = { - name = "servicebus-rg" - } -} - -vnets = { - vnet1 = { - resource_group_key = "rg1" - vnet = { - name = "servicebus-vnet" - address_space = ["172.33.0.0/16"] - } - subnets = { - subnet1 = { - name = "subnet1" - cidr = ["172.33.100.0/24"] - service_endpoints = ["Microsoft.ServiceBus"] - } - } - } -} - -servicebus_namespaces = { - namespace1 = { - resource_group = { - # lz_key = "" - key = "rg1" - } - name = "jstestbusaztfmod" - sku = "Premium" # Basic | standard | Premium - capacity = 1 # capacity only for Premium: 1,2,4,8,16 otherwise 0 - # zone_redundant = false # only true for Premium - # tags = {} # optional - namespace_auth_rules = { - rule1 = { - name = "rule1" - listen = true - send = true - manage = false - } - } - - network_rule_sets = { # created in terraform but not reflected in azure? - ruleset1 = { - default_action = "Allow" - ip_rules = ["1.1.1.1"] - network_rules = { - subnet1 = { - # lz_key = "" - vnet_key = "vnet1" - subnet_key = "subnet1" - ignore_missing_vnet_service_endpoint = false - } - } - } - } - - } -} - -servicebus_topics = { - topic1 = { - # resource_group = { # Default to follow the namespace resource group when not specified - # # lz_key = "" - # key = "rg1" - # } - servicebus_namespace = { - # lz_key = "" - key = "namespace1" - } - name = "topic1" - - # auto_delete_on_idle = "P0Y0M0DT0H5M0S" - # default_message_ttl = "P0Y0M0DT0H5M0S" - # duplicate_detection_history_time_window = "P0Y0M0DT0H5M0S" - enable_batched_operations = false - enable_express = false - enable_partitioning = false - max_size_in_megabytes = 1024 - requires_duplicate_detection = false - support_ordering = false - - topic_auth_rules = { - rule1 = { - name = "authrule1" - listen = true - send = false - manage = false # requires both listen and send - } - } - - subscriptions = { - sub1 = { - name = "subtest1" - max_delivery_count = 1 - - auto_delete_on_idle = "P14DT5M" - default_message_ttl = "P14D" - lock_duration = "PT30S" - dead_lettering_on_message_expiration = false - dead_lettering_on_filter_evaluation_error = true - enable_batched_operations = false - requires_session = false - status = "Active" # ReceiveDisabled, Disabled, Active (default) - - # forward_to = { - # # queue_name = "" # full name of the queue - # # topic_name = "" # full name of the topic - # queue = { # key reference only works for remote landingzone - # # name = "" - # # lz_key = "" - # # key = "" - # } - # # topic = { - # # # name = "" - # # lz_key = "" - # # key = "" - # # } - # } - - # forward_dead_lettered_messages_to = { - # # queue_name = "" # full name of the queue - # # topic_name = "" # full name of the topic - # queue = { # key reference only works for remote landingzone - # # name = "" - # # lz_key = "" - # # key = "" - # } - # # topic = { - # # # name = "" - # # lz_key = "" - # # key = "" - # # } - # } - - subscription_rules = { - - correlation_filter_rules = { - rule1 = { - name = "testrule1" - # action = "" # in sql syntax against BrokeredMessage - correlation_filter = { - correlation_id = "high" - label = "red" - properties = { - custom1 = "value" - } - } - } - } - - sql_filter_rules = { - rule1 = { - name = "testrule2" - # action = "" - filter_type = "SqlFilter" - sql_filter = "x=1" - } - } - - } - - } - } - } -} - -servicebus_queues = { - queue1 = { - name = "testqueue1" - servicebus_namespace = { - # lz_key = "" - key = "namespace1" - } - # resource_group = { # default to namespace rg - # lz_key = "" - # key = "" - # } - - # lock_duration = "PT30S" - # max_size_in_megabytes = 1024 - # requires_duplicate_detection = false - # requires_session = false - # default_message_ttl = "P14D" - # dead_lettering_on_message_expiration = false - # duplicate_detection_history_time_window = "PT10M" - # max_delivery_count = 1 - # status = "Active" # Active, Creating, Deleting, Disabled, ReceiveDisabled, Renaming, SendDisabled, Unknown - # enable_batched_operations = true - # auto_delete_on_idle = "PT5M" - # enable_partitioning = false - # enable_express = false - - # forward_to = { - # # queue_name = "" # full name of the queue - # # topic_name = "" # full name of the topic - # queue = { # key reference only works for remote landingzone - # # name = "" - # # lz_key = "" - # # key = "" - # } - # # topic = { - # # # name = "" - # # lz_key = "" - # # key = "" - # # } - # } - - # forward_dead_lettered_messages_to = { - # # queue_name = "" # full name of the queue - # # topic_name = "" # full name of the topic - # queue = { # key reference only works for remote landingzone - # # name = "" - # # lz_key = "" - # # key = "" - # } - # # topic = { - # # # name = "" - # # lz_key = "" - # # key = "" - # # } - # } - - queue_auth_rules = { - rule1 = { - name = "qauthrule1" - listen = true - send = false - manage = false - - } - } - } -} diff --git a/examples/synapse_analytics/101-synapse-sparkpool/configuration.tfvars b/examples/synapse_analytics/101-synapse-sparkpool/configuration.tfvars index 9ecadd8905..93f3b9a033 100644 --- a/examples/synapse_analytics/101-synapse-sparkpool/configuration.tfvars +++ b/examples/synapse_analytics/101-synapse-sparkpool/configuration.tfvars @@ -54,6 +54,7 @@ synapse_workspaces = { synapse_workspace_key = "synapse_wrkspc_re1" node_size_family = "MemoryOptimized" node_size = "Small" + spark_version = "3.4" auto_scale = { max_node_count = 50 min_node_count = 3 diff --git a/examples/tests/mock/e2e_plan.tftest.hcl b/examples/tests/mock/e2e_plan.tftest.hcl index a9d1cb2780..213502c713 100644 --- a/examples/tests/mock/e2e_plan.tftest.hcl +++ b/examples/tests/mock/e2e_plan.tftest.hcl @@ -3,7 +3,7 @@ mock_provider "azurerm" { } mock_provider "azurerm" { - alias = "vhub" + alias = "vhub" source = "./tests/mock_data" } diff --git a/examples/tests/mock_data/data.tfmock.hcl b/examples/tests/mock_data/data.tfmock.hcl index b29c7c3f4d..09746fff75 100644 --- a/examples/tests/mock_data/data.tfmock.hcl +++ b/examples/tests/mock_data/data.tfmock.hcl @@ -14,4 +14,22 @@ mock_data "azuread_client_config" { subscription_id = "00000000-0000-0000-0000-000000000000" tenant_id = "00000000-0000-0000-0000-000000000000" } +} + +mock_data "azurerm_subscription" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000001" + subscription_id = "00000000-0000-0000-0000-000000000001" + display_name = "mock_subscription" + tenant_id = "00000000-0000-0000-0000-000000000000" + } +} + +mock_data "azuread_service_principal" { + defaults = { + client_id = "00000000-0000-0000-0000-000000000000" + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "mock_service_principal" + object_id = "00000000-0000-0000-0000-000000000000" + } } \ No newline at end of file diff --git a/examples/variables.tf b/examples/variables.tf index 0e46b8498f..53625d0dd2 100644 --- a/examples/variables.tf +++ b/examples/variables.tf @@ -1130,7 +1130,7 @@ variable "maintenance_assignment_virtual_machine" { default = {} } variable "search_services" { - default = {} + default = {} } variable "load_test" { default = {} diff --git a/examples/webapps/appservice/103-appservice-extend/configuration.tfvars b/examples/webapps/appservice/103-appservice-extend/configuration.tfvars index f4cd1fcc04..b01dcbc5a3 100644 --- a/examples/webapps/appservice/103-appservice-extend/configuration.tfvars +++ b/examples/webapps/appservice/103-appservice-extend/configuration.tfvars @@ -52,19 +52,19 @@ app_services = { ip_restriction = [ { - name = "deny-all-traffic" - action = "Deny" - ip_address = "0.0.0.0/0" - priority = 65000 + name = "deny-all-traffic" + action = "Deny" + ip_address = "0.0.0.0/0" + priority = 65000 } ] scm_ip_restriction = [ { - name = "allow-all-traffic" - action = "Allow" - ip_address = "0.0.0.0/0" - priority = 65000 + name = "allow-all-traffic" + action = "Allow" + ip_address = "0.0.0.0/0" + priority = 65000 } ] } diff --git a/locals.tf b/locals.tf index 9d0d3f8179..f6038e943f 100644 --- a/locals.tf +++ b/locals.tf @@ -225,8 +225,8 @@ locals { inherit_tags = try(var.global_settings.inherit_tags, false) passthrough = try(var.global_settings.passthrough, false) prefix = try(var.global_settings.prefix, null) - prefix_with_hyphen = try(var.global_settings.prefix_with_hyphen, format("%s-", try(var.global_settings.prefix, try(var.global_settings.prefixes[0], random_string.prefix.0.result)))) - prefixes = try(var.global_settings.prefix, null) == "" ? null : try([var.global_settings.prefix], try(var.global_settings.prefixes, [random_string.prefix.0.result])) + prefix_with_hyphen = try(var.global_settings.prefix_with_hyphen, format("%s-", try(var.global_settings.prefix, try(var.global_settings.prefixes[0], random_string.prefix[0].result)))) + prefixes = try(var.global_settings.prefix, null) == "" ? null : try([var.global_settings.prefix], try(var.global_settings.prefixes, [random_string.prefix[0].result])) random_length = try(var.global_settings.random_length, 0) regions = try(var.global_settings.regions, null) tags = try(var.global_settings.tags, null) @@ -347,7 +347,7 @@ locals { vpn_sites = try(var.networking.vpn_sites, {}) } - object_id = coalesce(var.logged_user_objectId, var.logged_aad_app_objectId, try(data.azuread_client_config.current.object_id, null), try(data.azuread_service_principal.logged_in_app.0.object_id, null)) + object_id = coalesce(var.logged_user_objectId, var.logged_aad_app_objectId, try(data.azuread_client_config.current.object_id, null), try(data.azuread_service_principal.logged_in_app[0].object_id, null)) security = { disk_encryption_sets = try(var.security.disk_encryption_sets, {}) diff --git a/machine_learning.tf b/machine_learning.tf index 18011d51ee..589e4bfe64 100644 --- a/machine_learning.tf +++ b/machine_learning.tf @@ -7,9 +7,9 @@ module "machine_learning_workspaces" { global_settings = local.global_settings settings = each.value vnets = local.combined_objects_networking - storage_account_id = lookup(each.value, "storage_account_key") == null ? null : module.storage_accounts[each.value.storage_account_key].id - keyvault_id = lookup(each.value, "keyvault_key") == null ? null : module.keyvaults[each.value.keyvault_key].id - application_insights_id = lookup(each.value, "application_insights_key") == null ? null : module.azurerm_application_insights[each.value.application_insights_key].id + storage_account_id = can(each.value.storage_account_key) ? try(module.storage_accounts[each.value.storage_account_key].id, null) : null + keyvault_id = can(each.value.keyvault_key) ? try(module.keyvaults[each.value.keyvault_key].id, null) : null + application_insights_id = can(each.value.application_insights_key) ? try(module.azurerm_application_insights[each.value.application_insights_key].id, null) : null container_registry_id = can(each.value.container_registry_id) || can(each.value.container_registry_key) == false ? try(each.value.container_registry_id, null) : local.combined_objects_container_registry[try(each.value.lz_key, local.client_config.landingzone_key)][each.value.container_registry_key].id base_tags = try(local.global_settings.inherit_tags, false) ? try(local.combined_objects_resource_groups[try(each.value.resource_group.lz_key, local.client_config.landingzone_key)][try(each.value.resource_group.key, each.value.resource_group_key)].tags, {}) : {} diff --git a/main.tf b/main.tf index e411427323..605b96986c 100644 --- a/main.tf +++ b/main.tf @@ -3,7 +3,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "~> 3.75.0" + version = "~> 3.105.0" configuration_aliases = [ azurerm.vhub ] diff --git a/modules/apim/api_management_gateway_api/module.tf b/modules/apim/api_management_gateway_api/module.tf index a955868d9e..6c4dd19ff7 100644 --- a/modules/apim/api_management_gateway_api/module.tf +++ b/modules/apim/api_management_gateway_api/module.tf @@ -19,7 +19,6 @@ resource "azurerm_api_management_gateway_api" "apim" { content { create = try(timeouts.value.create, null) read = try(timeouts.value.read, null) - update = try(timeouts.value.update, null) delete = try(timeouts.value.delete, null) } } diff --git a/modules/databases/cosmos_dbs/output.tf b/modules/databases/cosmos_dbs/output.tf index 00995cc84d..379cff6d14 100644 --- a/modules/databases/cosmos_dbs/output.tf +++ b/modules/databases/cosmos_dbs/output.tf @@ -2,10 +2,6 @@ output "cosmos_account" { value = azurerm_cosmosdb_account.cosmos_account.id } -output "connection_string" { - value = azurerm_cosmosdb_account.cosmos_account.connection_strings[0] -} - output "primary_key" { value = azurerm_cosmosdb_account.cosmos_account.primary_key } diff --git a/modules/messaging/servicebus/namespace/namespace.tf b/modules/messaging/servicebus/namespace/namespace.tf index fbd10178f6..18d1a7541f 100644 --- a/modules/messaging/servicebus/namespace/namespace.tf +++ b/modules/messaging/servicebus/namespace/namespace.tf @@ -16,11 +16,12 @@ resource "azurecaf_name" "namespace" { } resource "azurerm_servicebus_namespace" "namespace" { - name = azurecaf_name.namespace.result - sku = var.settings.sku - capacity = try(var.settings.capacity, null) - zone_redundant = try(var.settings.zone_redundant, null) - tags = merge(local.tags, try(var.settings.tags, null), local.caf_tags) - location = local.location - resource_group_name = local.resource_group_name + name = azurecaf_name.namespace.result + sku = var.settings.sku + capacity = try(var.settings.capacity, null) + zone_redundant = try(var.settings.zone_redundant, null) + tags = merge(try(var.settings.tags, null), local.caf_tags) + premium_messaging_partitions = try(var.settings.premium_messaging_partitions, null) + location = local.location + resource_group_name = local.resource_group_name } diff --git a/shared_image_gallery.tf b/shared_image_gallery.tf index fe702a858f..9c8afaa8a8 100644 --- a/shared_image_gallery.tf +++ b/shared_image_gallery.tf @@ -44,7 +44,7 @@ module "packer_service_principal" { tenant_id = data.azurerm_client_config.current.tenant_id gallery_name = module.shared_image_galleries[each.value.shared_image_gallery_destination.gallery_key].name image_name = module.image_definitions[each.value.shared_image_gallery_destination.image_key].name - key_vault_id = lookup(each.value, "keyvault_key") == null ? null : module.keyvaults[each.value.keyvault_key].id + key_vault_id = lookup(each.value, "keyvault_key", null) == null ? null : module.keyvaults[each.value.keyvault_key].id settings = each.value depends_on = [ @@ -67,7 +67,7 @@ module "packer_build" { tenant_id = data.azurerm_client_config.current.tenant_id gallery_name = module.shared_image_galleries[each.value.shared_image_gallery_destination.gallery_key].name image_name = module.image_definitions[each.value.shared_image_gallery_destination.image_key].name - key_vault_id = lookup(each.value, "keyvault_key") == null ? null : module.keyvaults[each.value.keyvault_key].id + key_vault_id = each.value.keyvault_key == null ? null : module.keyvaults[each.value.keyvault_key].id managed_identities = local.combined_objects_managed_identities vnet_name = try(try(local.combined_objects_networking[each.value.lz_key][each.value.vnet_key].name, local.combined_objects_networking[local.client_config.landingzone_key][each.value.vnet_key].name), "") subnet_name = try(lookup(each.value, "lz_key", null) == null ? local.combined_objects_networking[local.client_config.landingzone_key][each.value.vnet_key].subnets[each.value.subnet_key].name : local.combined_objects_networking[each.value.lz_key][each.value.vnet_key].subnets[each.value.subnet_key].name, "")