diff --git a/app.py b/app.py index d1c3b78..96ef1cf 100644 --- a/app.py +++ b/app.py @@ -26,21 +26,23 @@ class AppModel(BaseModel): env_dev = Environment(account=config.dev.account, region=config.dev.region) + env_name = 'dev' if config.infra_repository_name: PipelineStack(app, "{}-pipeline".format(config.project_name), + env_name, config, env=env_dev) if config.dev.ami_factory: AmiFactoryStack(app, "{}-dev-ami-factory".format(config.project_name), - 'dev', + env_name, config.project_name, config.dev.ami_factory, env=env_dev) if config.dev.software_factory: SoftwareFactoryStack(app, "{}-dev-software-factory".format(config.project_name), - 'dev', + env_name, config.project_name, config.dev.software_factory, env=env_dev) diff --git a/base_amis_mappings/NICE-DCV-windows-2019.json b/base_amis_mappings/NICE-DCV-windows-2019.json index e2fdc3d..7ea3345 100644 --- a/base_amis_mappings/NICE-DCV-windows-2019.json +++ b/base_amis_mappings/NICE-DCV-windows-2019.json @@ -1,22 +1,18 @@ { - "af-south-1": "ami-03a28b1d80470d9f1", - "ap-east-1": "ami-0e018cb29212e3c2c", - "ap-northeast-1": "ami-0185f9436c63606d6", - "ap-northeast-2": "ami-018fe3f0c542fec79", - "ap-south-1": "ami-062e11d3e0f0de270", - "ap-southeast-1": "ami-0c25850f7aa9d44d4", - "ap-southeast-2": "ami-05dd820eeb3633632", - "ca-central-1": "ami-0f39324ccb81a203f", - "eu-central-1": "ami-040f19c78276862a1", - "eu-north-1": "ami-04344deb81ca3b0f5", - "eu-south-1": "ami-0a545a4f9bd3857ce", - "eu-west-1": "ami-0b15c766ea9a1d61e", - "eu-west-2": "ami-012276bb2f5e8832e", - "eu-west-3": "ami-0e104db71d5e790ab", - "me-south-1": "ami-0f10781a19b675ecc", - "sa-east-1": "ami-0bfe6221c55cc4dad", - "us-east-1": "ami-0d0bc8a4d63535f70", - "us-east-2": "ami-07b97db455511a206", - "us-west-1": "ami-0e69120f94216db9f", - "us-west-2": "ami-0373b9a656ee59f1b" + "ap-south-1": "ami-0ffe80391a3223e58", + "eu-north-1": "ami-05b421add474d0675", + "eu-west-3": "ami-09a76fbd01a903c45", + "eu-west-2": "ami-0497ab04e4b0f8dbe", + "eu-west-1": "ami-05ed6655d770f88df", + "ap-northeast-2": "ami-0a17877eb79927851", + "ap-northeast-1": "ami-06fcd61683a513c8b", + "ca-central-1": "ami-0522e17c1096e7d0c", + "sa-east-1": "ami-0f8268479072dbc5f", + "ap-southeast-1": "ami-08759ca916a5fcc11", + "ap-southeast-2": "ami-042e99cef74daf858", + "eu-central-1": "ami-0ce3d8354129b16cf", + "us-east-1": "ami-03b3cfa0e7a05c83f", + "us-east-2": "ami-0249ff8e394e546dd", + "us-west-1": "ami-0c2cafa094aed8c19", + "us-west-2": "ami-0c79505ce1748ef67" } \ No newline at end of file diff --git a/blueprints/etas-autosar-cp/README.md b/blueprints/etas-autosar-cp/README.md index a37fb54..40c335c 100644 --- a/blueprints/etas-autosar-cp/README.md +++ b/blueprints/etas-autosar-cp/README.md @@ -41,7 +41,7 @@ and connect using with the following credentials: Username: `Administrator` -Password: `Reinvent2023@` +Password: `Etas2024@` ![Access workbench](./docs/credentials.png) @@ -51,7 +51,7 @@ You're good to go! ## Demo Walkthrough -1. To get started, once on the desktop, open ASCET_DEVELOPER and set the default workspace as `C:\demo_etas_git\ws`. +1. To get started, once on the desktop, open ASCET_DEVELOPER and set the default workspace as `C:\demo_etas\working-dir\ws`. ![Step 1](./docs/step1.png) @@ -65,27 +65,35 @@ You're good to go! 4. Save your modifications. -5. Open Windows PowerShell. +5. Push your modifications to s3. On the Desktop of the workbench, you will find a shortcut named `sync-to-s3.ps1`. Using the context menu you can execute it with PowerShell. This script will compress the content of the `C:\demo_etas\working-dir` folder and copy the generated zip to the S3 bucket associated with the project. This will trigger the pipeline execution. -6. Push your modifications to the repository. +[![Step 5](./docs/step5.png)](./docs/step5.png) -```sh -cd C:/demo_etas_git/ -git commit -am "changed velocity gain to 5" -git push -``` +6. In the AWS Console, navigate to AWS CodePipeline, open the **project-1-dev** pipeline and you will be able to follow its execution. -7. In the AWS Console, navigate to AWS CodePipeline, open the **project-1-dev** pipeline and you will be able to follow its execution. +7. Back on the desktop, you can also open the **Artefacts** shortcut to have a look at the artefacts generated by the pipeline. They are indexed by date and commit ID. Under `//mil-tests` you will find an `index.html` file. Right-click it and open it with Google Chrome to see the results of the failed Model-In-the-Loop tests. -8. Back on the desktop, you can also open the **Artefacts** shortcut to have a look at the artefacts generated by the pipeline. They are indexed by commit ID. Under `/mil-tests` you will find an `index.html` file. Right-click it and open it with Google Chrome to see the results of the failed Model-In-the-Loop tests. - -9. You can now start again from step **2.** by putting back the `Relative_Velocity_Gain` Default Value to **0.4**. This will result in a successful execution of the pipeline until the SiL tests, whose results can be found again directly on the desktop under the **Artefacts** shortcut. +8. You can now start again from step **2.** by putting back the `Relative_Velocity_Gain` Default Value to **0.4**. This will result in a successful execution of the pipeline until the SiL tests, whose results can be found again directly on the desktop under the **Artefacts** shortcut. ## Demo Video Here is the re:Invent 2023 official demo video with comments on the end-to-end workflow. The underlying example (Speed Controller) is simpler than the Adaptice Cruise Controller (ACC) featured in this example. As you will not be using the Virtual Engineering Workbench, the relevant part starts at 1:40. [![re:Invent Demo](docs/reinvent23demo.png)](https://www.youtube.com/watch?v=8cUedpXNTbY&ab_channel=AmazonWebServices) +## Provided scripts +On the desktop of the workbench, you will find two scripts: `sync-to-s3.ps1` and `restore-from-s3.ps1`. +Using the context menu you can execute the scripts with PowerShell. + +[![sync-scripts](./docs/sync-scripts.png)](./docs/sync-scripts.png) + +- `sync-to-s3.ps1` will compress the content of the `C:\demo_etas\working-dir` folder and copy the generated zip to the S3 bucket associated with the project. This will trigger the pipeline execution. +- `restore-from-s3.ps1` will stop any running instance of ascet and restore the workspace from the S3 bucket back to the original version. This can be used in case the working directory is corrupted or you want to start from a clean workspace. + +## Why do we use S3 for source code management? + +The AWS git system **code commit** has been deprecated, requiring setup changes. +The pipeline has been updated to use an S3 bucket to synchronise the workspace from the workbench to the worker instances. See the architecture diagram for the updated pipeline. +This was done to keep all project files within the AWS ecosystem and to simplify the setup. ## Tools Details diff --git a/blueprints/etas-autosar-cp/config-debug.yml b/blueprints/etas-autosar-cp/config-debug.yml deleted file mode 100644 index 80e10fe..0000000 --- a/blueprints/etas-autosar-cp/config-debug.yml +++ /dev/null @@ -1,143 +0,0 @@ -project_name: project-1 - -dev: - software_factory: - repository: - name: project-1-dev - vpc: - ip_addresses: 10.1.0.0/16 - workbench: - instance_type: m6i.2xlarge - ami: - ami_ids: - eu_central_1: ami-0db7eea25fa109d3d - us_east_1: ami-095f542f273999f90 - us_west_2: ami-0ec8a2bc8f7d0164f - ap_southeast_1: ami-06c45705df85f8ad1 - volumes: - - size: 200 - device_name: /dev/sda1 - user_data: - - |- - Set-ExecutionPolicy -ExecutionPolicy Unrestricted - net user Administrator "Reinvent2023@" /active:Yes - [Environment]::SetEnvironmentVariable("Path", [Environment]::GetEnvironmentVariable('Path', "Machine") + ";c:\App_Installers\tools\rclone-v1.64.2-windows-amd64", "Machine") - aws configure set default.region $env:AWS_DEFAULT_REGION - git clone codecommit://project-1-dev c:\demo_etas_git - git config --global user.email "no-reply@etas.com" - git config --global user.name "workbench" - c:\demo_etas_git\scripts\ReInventDemo_CreateWorkspace.cmd - - $config = @" - [remote] - type = s3 - provider = AWS - env_auth = true - region = $env:AWS_DEFAULT_REGION - location_constraint = $env:AWS_DEFAULT_REGION - storage_class = STANDARD - "@ - New-Item -Path "C:\Users\Administrator\AppData\Roaming\" -Name "rclone" -ItemType "directory" -force - Set-Content "C:\Users\Administrator\AppData\Roaming\rclone\rclone.conf" $config - - $script = @" - rclone mount remote:/$env:ARTIFACT_BUCKET_NAME C:\Users\Administrator\Desktop\Artifacts --no-console --log-file C:\Users\Administrator\AppData\Roaming\rclone\log.txt --vfs-cache-mode full - "@ - Set-Content "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\rclone.bat" $script - - workers: - instance_type: m6i.2xlarge - ami: - ami_ids: - eu_central_1: ami-0db7eea25fa109d3d - us_east_1: ami-095f542f273999f90 - us_west_2: ami-0ec8a2bc8f7d0164f - ap_southeast_1: ami-06c45705df85f8ad1 - min_capacity: 1 - max_capacity: 1 - volumes: - - size: 200 - device_name: /dev/sda1 - user_data: - - |- - Set-ExecutionPolicy -ExecutionPolicy Unrestricted - net user Administrator "Reinvent2023@" /active:Yes - aws configure set default.region $env:AWS_DEFAULT_REGION - - # for debug - Remove-Item C:\App_Installers\tools\ETAS_Tools\aasf-etas-autosar-cp -Recurse -Force - cd C:\App_Installers\tools\ETAS_Tools - git clone codecommit://aasf-etas-autosar-cp - $workerhome="C:\App_Installers\tools\ETAS_Tools\aasf-etas-autosar-cp\worker" - python -m venv "$workerhome\.venv" - & "$workerhome\.venv\Scripts\pip.exe" install -r "$workerhome\requirements.txt" - # - - - git clone codecommit://project-1-dev c:\demo_etas_git - - # for debug - git config --global user.email "no-reply@etas.com" - git config --global user.name "worker" - Get-ChildItem C:\App_Installers\tools\ETAS_Tools\aasf-etas-autosar-cp\repo | Copy-Item -Destination c:\demo_etas_git -Recurse - cd c:\demo_etas_git - git branch -m master main - git add --all - git commit -m "First import" - git push origin main - # - - - if(!(Test-Path -Path "c:\demo_etas_git\src" )) { - git config --global user.email "no-reply@etas.com" - git config --global user.name "worker" - Get-ChildItem C:\App_Installers\tools\ETAS_Tools\aasf-etas-autosar-cp\repo | Copy-Item -Destination c:\demo_etas_git -Recurse - cd c:\demo_etas_git - git branch -m master main - git add --all - git commit -m "First import" - git push origin main - } - - cosym --start --mode=headless --acknowledgement=yes - if(!(Test-Path -Path "C:\checkworker.ps1" )) { - $workerhome="C:\App_Installers\tools\ETAS_Tools\aasf-etas-autosar-cp\worker" - python -m venv "$workerhome\.venv" - & "$workerhome\.venv\Scripts\pip.exe" install -r "$workerhome\requirements.txt" - $script = @" - if (-not (Get-Process -Name python -ErrorAction SilentlyContinue)) { - `$workerhome="$workerhome" - `$python="`$workerhome\.venv\Scripts\python.exe" - `$worker="`$workerhome\worker.py" - Start-Process -filepath `$python -ArgumentList `$worker -Verb RunAs -WindowStyle Hidden - get-date | Add-Content c:\checkworker.log - } - "@ - Set-Content "C:\checkworker.ps1" $script - $action = New-ScheduledTaskAction -Execute "PowerShell.exe" -Argument "C:\checkworker.ps1" - $trigger = New-ScheduledTaskTrigger -Once -At (Get-Date) -RepetitionInterval (New-TimeSpan -Minutes 1) -RepetitionDuration (New-TimeSpan -Days 3650) - $principal = New-ScheduledTaskPrincipal -UserID "NT AUTHORITY\SYSTEM" -LogonType ServiceAccount -RunLevel Highest - $settings = New-ScheduledTaskSettingsSet -MultipleInstances Parallel - Register-ScheduledTask -TaskName "CheckWorker" -Action $action -Trigger $trigger -Settings $settings -Principal $principal - } - stages: - - name: MIL - actions: - - name: Testing - buildspec: mil-testing.yml - - name: EHBContainerBuild - buildspec: ehb-container-build.yml - - name: CodeGeneration - actions: - - name: ASWCodeGeneration - buildspec: asw-codegeneration.yml - - name: vECUBuild - actions: - - name: vECUBuild - buildspec: vecu-build.yml - - name: COSYMBuild - actions: - - name: COSYMBuild - buildspec: cosym-build.yml - - name: SILTesting - actions: - - name: ModelSimulator - buildspec: model-simulator.yml diff --git a/blueprints/etas-autosar-cp/config.yml b/blueprints/etas-autosar-cp/config.yml index c89ea80..a74c414 100644 --- a/blueprints/etas-autosar-cp/config.yml +++ b/blueprints/etas-autosar-cp/config.yml @@ -3,6 +3,10 @@ dev: software_factory: artifacts: retain: false + sourcecode: + retain: false + cloudtrail: + retain: false repository: name: project-1-dev vpc: @@ -11,22 +15,25 @@ dev: instance_type: m6i.large ami: ami_ids: - eu_central_1: ami-0e3b10b481c019b28 - us_east_1: ami-0e74095d545ca2cbe - ap_southeast_1: ami-075e79ceac41c3003 + eu_central_1: ami-09cf5e00bb63ed741 + us_east_1: ami-0fdaa9be8cfc23ac8 + ap_southeast_1: ami-02b9c4a08521412e7 volumes: - size: 200 device_name: /dev/sda1 user_data: - |- Set-ExecutionPolicy -ExecutionPolicy Unrestricted - net user Administrator "Reinvent2023@" /active:Yes + net user Administrator "Etas2024@" /active:Yes [Environment]::SetEnvironmentVariable("Path", [Environment]::GetEnvironmentVariable('Path', "Machine") + ";c:\App_Installers\tools\rclone-v1.64.2-windows-amd64", "Machine") aws configure set default.region $env:AWS_DEFAULT_REGION - git clone codecommit://project-1-dev c:\demo_etas_git - git config --global user.email "no-reply@etas.com" - git config --global user.name "workbench" - C:\ETAS\ASCET-DEVELOPER7.9\ascet.exe -nosplash -data C:\demo_etas_git\ws -application org.eclipse.cdt.managedbuilder.core.headlessbuild -importAll C:\demo_etas_git\src\ascet\main\ -cleanBuild all + # Setup the working directory and initialize the ascet workspace + if(!(Test-Path -Path "C:\demo_etas\working-dir" )) { + aws s3 cp s3://$env:SOURCE_BUCKET_NAME/3.0.0/ c:\demo_etas\working-dir --recursive + C:\ETAS\ASCET-DEVELOPER7.9\ascet.exe -nosplash -data C:\demo_etas\working-dir\ws -application org.eclipse.cdt.managedbuilder.core.headlessbuild -importAll C:\demo_etas\src\ascet\main\ -cleanBuild all + } + # Setup the rclone configuration + if(!(Test-Path -Path "C:\Users\Administrator\AppData\Roaming\rclone\rclone.conf" )) { $config = @" [remote] type = s3 @@ -36,19 +43,58 @@ dev: location_constraint = $env:AWS_DEFAULT_REGION storage_class = STANDARD "@ - New-Item -Path "C:\Users\Administrator\AppData\Roaming\" -Name "rclone" -ItemType "directory" -force - Set-Content "C:\Users\Administrator\AppData\Roaming\rclone\rclone.conf" $config + New-Item -Path "C:\Users\Administrator\AppData\Roaming\" -Name "rclone" -ItemType "directory" -force + Set-Content "C:\Users\Administrator\AppData\Roaming\rclone\rclone.conf" $config + } + # Setup the rclone mount + if(!(Test-Path -Path "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\rclone.bat" )) { $script = @" rclone mount remote:/$env:ARTIFACT_BUCKET_NAME C:\Users\Administrator\Desktop\Artifacts --no-console --log-file C:\Users\Administrator\AppData\Roaming\rclone\log.txt --vfs-cache-mode full "@ - Set-Content "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\rclone.bat" $script + Set-Content "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp\rclone.bat" $script + } + # Setup the sync to s3 script + if(!(Test-Path -Path "C:\Users\Administrator\Desktop\sync-to-s3.ps1" )) { + $syncaws = @" + `$ErrorActionPreference = "Stop"` + if(Test-Path -Path "C:\demo_etas\working-dir.zip" ) { + rm C:\demo_etas\working-dir.zip + } + & "C:\Program Files\7-zip\7z" a -tzip C:\demo_etas\working-dir.zip -r C:\demo_etas\working-dir\* -x!ws + aws s3 cp C:\demo_etas\working-dir.zip s3://$env:SOURCE_BUCKET_NAME/ + aws s3 sync C:\demo_etas\working-dir s3://$env:SOURCE_BUCKET_NAME/working-dir --delete --exclude "ws" + "@ + Set-Content "C:\Users\Administrator\Desktop\sync-to-s3.ps1" $syncaws + # On the first run, execute the sync script + & "C:\Users\Administrator\Desktop\sync-to-s3.ps1" + } + # Setup the restore from s3 script + if(!(Test-Path -Path "C:\Users\Administrator\Desktop\restore-from-s3.ps1" )) { + $restore = @" + `$ErrorActionPreference = "Stop"` + # Check if the process ascet.exe is running + $process = Get-Process -Name "ascet" -ErrorAction SilentlyContinue + + # If the process is found, kill it + if ($process) { + Stop-Process -Name "ascet" -Force + Write-Output "ascet.exe was running and has been terminated." + } + if(Test-Path -Path "C:\demo_etas\working-dir" ) { + rm -r C:\demo_etas\working-dir + } + aws s3 cp s3://$env:SOURCE_BUCKET_NAME/3.0.0 C:\demo_etas\working-dir --recursive + C:\ETAS\ASCET-DEVELOPER7.9\ascet.exe -nosplash -data C:\demo_etas\working-dir\ws -application org.eclipse.cdt.managedbuilder.core.headlessbuild -importAll C:\demo_etas\working-dir\src\ascet\main\ -cleanBuild all + "@ + Set-Content "C:\Users\Administrator\Desktop\restore-from-s3.ps1" $restore + } workers: instance_type: m6i.xlarge ami: ami_ids: - eu_central_1: ami-0e3b10b481c019b28 - us_east_1: ami-0e74095d545ca2cbe - ap_southeast_1: ami-075e79ceac41c3003 + eu_central_1: ami-09cf5e00bb63ed741 + us_east_1: ami-0fdaa9be8cfc23ac8 + ap_southeast_1: ami-02b9c4a08521412e7 min_capacity: 1 max_capacity: 1 volumes: @@ -57,18 +103,12 @@ dev: user_data: - |- Set-ExecutionPolicy -ExecutionPolicy Unrestricted - net user Administrator "Reinvent2023@" /active:Yes + net user Administrator "Etas2024@" /active:Yes aws configure set default.region $env:AWS_DEFAULT_REGION - git clone codecommit://project-1-dev c:\demo_etas_git - if(!(Test-Path -Path "c:\demo_etas_git\src" )) { - git config --global user.email "no-reply@etas.com" - git config --global user.name "worker" - Get-ChildItem C:\App_Installers\tools\ETAS_Tools\aasf-etas-autosar-cp\repo | Copy-Item -Destination c:\demo_etas_git -Recurse - cd c:\demo_etas_git - git branch -m master main - git add --all - git commit -m "First import" - git push origin main + if(!(Test-Path -Path "c:\demo_etas\working-dir\src" )) { + Copy-Item -Path C:\App_Installers\tools\ETAS_Tools\aasf-etas-autosar-cp\repo -Destination c:\demo_etas\working-dir -Recurse + aws s3 cp c:\demo_etas\working-dir s3://$env:SOURCE_BUCKET_NAME/3.0.0 --recursive + aws s3 cp c:\demo_etas\working-dir s3://$env:SOURCE_BUCKET_NAME/working-dir --recursive } cosym --start --mode=headless --acknowledgement=yes if(!(Test-Path -Path "C:\checkworker.ps1" )) { @@ -88,7 +128,7 @@ dev: $action = New-ScheduledTaskAction -Execute "PowerShell.exe" -Argument "C:\checkworker.ps1" $trigger = New-ScheduledTaskTrigger -Once -At (Get-Date) -RepetitionInterval (New-TimeSpan -Minutes 1) -RepetitionDuration (New-TimeSpan -Days 3650) $settings = New-ScheduledTaskSettingsSet -MultipleInstances Parallel - Register-ScheduledTask -TaskName "CheckWorker" -Action $action -Trigger $trigger -Settings $settings -User Administrator -Password "Reinvent2023@" + Register-ScheduledTask -TaskName "CheckWorker" -Action $action -Trigger $trigger -Settings $settings -User Administrator -Password "Etas2024@" } stages: - name: MIL diff --git a/blueprints/etas-autosar-cp/docs/architecture.png b/blueprints/etas-autosar-cp/docs/architecture.png index 735b5da..fdadc82 100644 Binary files a/blueprints/etas-autosar-cp/docs/architecture.png and b/blueprints/etas-autosar-cp/docs/architecture.png differ diff --git a/blueprints/etas-autosar-cp/docs/credentials.png b/blueprints/etas-autosar-cp/docs/credentials.png index b9932e8..9ba4825 100644 Binary files a/blueprints/etas-autosar-cp/docs/credentials.png and b/blueprints/etas-autosar-cp/docs/credentials.png differ diff --git a/blueprints/etas-autosar-cp/docs/step1.png b/blueprints/etas-autosar-cp/docs/step1.png index 0a1ebb5..474dc9d 100644 Binary files a/blueprints/etas-autosar-cp/docs/step1.png and b/blueprints/etas-autosar-cp/docs/step1.png differ diff --git a/blueprints/etas-autosar-cp/docs/step5.png b/blueprints/etas-autosar-cp/docs/step5.png new file mode 100644 index 0000000..05e585e Binary files /dev/null and b/blueprints/etas-autosar-cp/docs/step5.png differ diff --git a/blueprints/etas-autosar-cp/docs/sync-scripts.png b/blueprints/etas-autosar-cp/docs/sync-scripts.png new file mode 100644 index 0000000..bdaf0e8 Binary files /dev/null and b/blueprints/etas-autosar-cp/docs/sync-scripts.png differ diff --git a/docs/architecture.png b/docs/architecture.png index 29cc3d2..4839f19 100644 Binary files a/docs/architecture.png and b/docs/architecture.png differ diff --git a/src/ami_factory.py b/src/ami_factory.py index f9bd6b0..327526e 100644 --- a/src/ami_factory.py +++ b/src/ami_factory.py @@ -12,39 +12,38 @@ class AmiFactoryModel(BaseModel): instance_types: List[str] components: Optional[List[ComponentModel]] = [] amis: List[AmiModel] - -class AmiFactoryStack(Stack): - def __init__(self, scope: Construct, construct_id: str, - env_name: str, - project_name: str, - config: AmiFactoryModel, - **kwargs) -> None: - super().__init__(scope, construct_id, **kwargs) - - builder = ImageBuilder(self, 'ImageBuilder', - env_name, - project_name, - config.instance_types) - try: - # read the first line from download-url.txt file - with open("./download-url.txt", "r") as f: - download_url = f.readline() - parameter = ssm.StringParameter(self, "DownloadUrl", - parameter_name="download_url", - string_value=download_url) - parameter.grant_read(builder.role) - except FileNotFoundError: - print('[WARNING] File download-url.txt not found') - pass +class AmiFactoryStack(Stack): + def __init__(self, scope: Construct, construct_id: str, + env_name: str, + project_name: str, + config: AmiFactoryModel, + **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + builder = ImageBuilder(self, 'ImageBuilder', + env_name, + project_name, + config.instance_types) - builder.role.add_managed_policy( - iam.ManagedPolicy.from_aws_managed_policy_name( - 'AWSCodeCommitFullAccess')); - - for component in config.components: - builder.add_component(component) - - for ami in config.amis: - builder.add_ami(ami) + try: + # read the first line from download-url.txt file + with open("./download-url.txt", "r") as f: + download_url = f.readline() + parameter = ssm.StringParameter(self, "DownloadUrl", + parameter_name="download_url", + string_value=download_url) + parameter.grant_read(builder.role) + except FileNotFoundError: + print('[WARNING] File download-url.txt not found') + pass + builder.role.add_managed_policy( + iam.ManagedPolicy.from_aws_managed_policy_name( + 'AWSCodeCommitFullAccess')); + + for component in config.components: + builder.add_component(component) + + for ami in config.amis: + builder.add_ami(ami) \ No newline at end of file diff --git a/src/image_builder.py b/src/image_builder.py index da84d5c..b0c9fbb 100644 --- a/src/image_builder.py +++ b/src/image_builder.py @@ -9,7 +9,6 @@ Stack, aws_iam as iam, aws_imagebuilder as imagebuilder, - aws_ssm as ssm, ) from cdk_ec2_key_pair import KeyPair from botocore.exceptions import ClientError @@ -67,9 +66,9 @@ class AmiModel(BaseModel): class ImageBuilder(Construct): def __init__(self, scope: Construct, id: str, - env_name: str, - project_name: str, - instance_types: List[str]): + env_name: str, + project_name: str, + instance_types: List[str]): super().__init__(scope, id) self._components = [] @@ -98,16 +97,16 @@ def __init__(self, scope: Construct, id: str, roles = [ self.role.role_name ]) key = KeyPair(self, "KeyPair", - name=name, + key_pair_name=name, store_public_key=True) #TBD: include specific VPC configuration, because run will fail if no default VPC configuration = imagebuilder.CfnInfrastructureConfiguration(self, 'Configuration', - name = f'{project_name}-{env_name}', - instance_types = instance_types, - instance_profile_name = name, - key_pair = key.key_pair_name, - terminate_instance_on_failure = False) + name = f'{project_name}-{env_name}', + instance_types = instance_types, + instance_profile_name = name, + key_pair = key.key_pair_name, + terminate_instance_on_failure = False) self.attr_arn = configuration.attr_arn configuration.add_dependency(instance_profile) @@ -186,7 +185,4 @@ def add_ami(self, ami: AmiModel): distribution_configuration_arn = imagebuilder.CfnDistributionConfiguration( self, 'DistributionConfiguration', name=ami.name, - distributions=distributions).attr_arn)) - - - + distributions=distributions).attr_arn)) \ No newline at end of file diff --git a/src/pipeline.py b/src/pipeline.py index b9dcc0c..9c7d172 100644 --- a/src/pipeline.py +++ b/src/pipeline.py @@ -1,23 +1,34 @@ import aws_cdk as cdk from constructs import Construct from aws_cdk.pipelines import CodePipeline, CodePipelineSource, ShellStep +from aws_cdk import ( + Stack, + aws_iam as iam, + aws_s3 as s3, +) from src.stage import PipelineStage, PipelineStageModel class PipelineStack(cdk.Stack): - def __init__(self, scope: Construct, construct_id: str, - config: PipelineStageModel, + def __init__(self, scope: Construct, construct_id: str, + env_name: str, + config: PipelineStageModel, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) - repo = cdk.aws_codecommit.Repository.from_repository_name(self, "MyRepo", - config.infra_repository_name) + account_id = Stack.of(self).account + region = Stack.of(self).region + + key = 'working-dir.zip' + source_code = s3.Bucket.from_bucket_name(self, + 'SourceCodeBucket', + f'{config.project_name}-{env_name}-sourcecode-{account_id}-{region}') pipeline = CodePipeline(self, "Pipeline", pipeline_name="{}-infra".format(config.project_name), cross_account_keys=True, synth=ShellStep("Synth", - input=CodePipelineSource.code_commit(repo, config.infra_repository_branch), + input=CodePipelineSource.s3(source_code, object_key=key), commands=["npm install -g aws-cdk", "python -m pip install -r requirements.txt", "cdk synth"])) diff --git a/src/software_factory.py b/src/software_factory.py index 0840fd5..cac4f81 100644 --- a/src/software_factory.py +++ b/src/software_factory.py @@ -1,26 +1,21 @@ -import os from constructs import Construct from aws_cdk import ( Stack, CfnOutput, RemovalPolicy, - aws_codecommit as cc, aws_codebuild as cb, aws_codepipeline as cp, aws_codepipeline_actions as cp_actions, aws_iam as iam, aws_s3 as s3, - aws_ec2 as ec2 + aws_ec2 as ec2, + aws_cloudtrail as ct ) from pydantic import BaseModel from typing import Optional, List from src.workers import Workers, WorkersModel from src.workbench import Workbench, WorkbenchModel -class RepositoryModel(BaseModel): - name: str - code: Optional[str] = None - class VpcModel(BaseModel): ip_addresses: str = "10.1.0.0/16" @@ -35,9 +30,16 @@ class StageModel(BaseModel): class Artifacts(BaseModel): retain: bool = True +class Sourcecode(BaseModel): + retain: bool = True + +class Cloudtrail(BaseModel): + retain: bool = True + class SoftwareFactoryModel(BaseModel): artifacts: Optional[Artifacts] = Artifacts() - repository: RepositoryModel + sourcecode: Optional[Sourcecode] = Sourcecode() + cloudtrail: Optional[Cloudtrail] = Cloudtrail() vpc: Optional[VpcModel] = VpcModel() workers: Optional[WorkersModel] = None stages: Optional[List[StageModel]] = None @@ -56,20 +58,26 @@ def __init__(self, scope: Construct, construct_id: str, CfnOutput(self, "Account ID", value=account_id, description='Account ID') - kwargs = { 'repository_name': config.repository.name } - if config.repository.code: - kwargs['code'] = cc.Code.from_directory(directory_path = os.path.join( - os.path.dirname(__file__), - os.path.join('..', config.repository.code))) - - self.repository = cc.Repository(self, 'Repository', **kwargs) - kwargs = { 'bucket_name': f'{project_name}-{env_name}-{account_id}-{region}' } if not config.artifacts.retain: kwargs['removal_policy'] = RemovalPolicy.DESTROY kwargs['auto_delete_objects'] = True self.artifact = s3.Bucket(self, 'ArtifactBucket', **kwargs) + + kwargs = { 'bucket_name': f'{project_name}-{env_name}-sourcecode-{account_id}-{region}', 'versioned': True} + if not config.sourcecode.retain: + kwargs['removal_policy'] = RemovalPolicy.DESTROY + kwargs['auto_delete_objects'] = True + + self.source_code = s3.Bucket(self, 'SourceCodeBucket', **kwargs) + + kwargs = { 'bucket_name': f'{project_name}-{env_name}-cloudtrail-{account_id}-{region}'} + if not config.cloudtrail.retain: + kwargs['removal_policy'] = RemovalPolicy.DESTROY + kwargs['auto_delete_objects'] = True + + self.cloudtrail = s3.Bucket(self, 'LogBucket', **kwargs) self.vpc = ec2.Vpc(self, 'VPC', ip_addresses = ec2.IpAddresses.cidr(config.vpc.ip_addresses), @@ -96,10 +104,6 @@ def __init__(self, scope: Construct, construct_id: str, service=ec2.InterfaceVpcEndpointAwsService.SSM, subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)) - self.vpc.add_interface_endpoint("CC", - service=ec2.InterfaceVpcEndpointAwsService.CODECOMMIT_GIT , - subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)) - self.vpc.add_interface_endpoint("CW", service=ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS , subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)) @@ -107,6 +111,7 @@ def __init__(self, scope: Construct, construct_id: str, cb_role = iam.Role(self, 'CodeBuildRole', assumed_by=iam.ServicePrincipal('codebuild.amazonaws.com')) self.artifact.grant_read_write(cb_role) + self.source_code.grant_read_write(cb_role) if config.workers: workers = Workers(self, 'Workers', @@ -120,24 +125,31 @@ def __init__(self, scope: Construct, construct_id: str, actions=['mq:ListBrokers'], resources=['*'])) workers.secret.grant_read(cb_role) - self.repository.grant_pull_push(workers.role) + self.source_code.grant_read_write(workers.role) pipeline = cp.Pipeline(self, 'Pipeline', pipeline_name=f'{project_name}-{env_name}', cross_account_keys=False, artifact_bucket=self.artifact) - - pipeline.artifact_bucket - + source_stage = pipeline.add_stage(stage_name='Source') source_artifact = cp.Artifact() + key = 'working-dir.zip' - source_stage.add_action(cp_actions.CodeCommitSourceAction( - action_name='Source', + trail = ct.Trail(self, "CloudTrail", bucket=self.cloudtrail) + trail.add_s3_event_selector([ct.S3EventSelector( + bucket=self.source_code, + object_prefix=key + )], + read_write_type=ct.ReadWriteType.WRITE_ONLY + ) + source_stage.add_action(cp_actions.S3SourceAction( + action_name='S3Source', output=source_artifact, - repository=self.repository, - branch='main')) - + bucket=self.source_code, + bucket_key=key, + trigger=cp_actions.S3Trigger.EVENTS)) + for stage in config.stages: actions = [] for action in stage.actions: @@ -148,6 +160,8 @@ def __init__(self, scope: Construct, construct_id: str, build_image=cb.LinuxBuildImage.AMAZON_LINUX_2_5), 'build_spec': cb.BuildSpec.from_source_filename(f'.cb/{action.buildspec}'), 'environment_variables': { + 'SOURCE_CODE_BUCKET_NAME': cb.BuildEnvironmentVariable( + value=f'{self.source_code.bucket_name}'), 'ARTIFACT_BUCKET_NAME': cb.BuildEnvironmentVariable( value=f'{self.artifact.bucket_name}'), 'WORKER_QUEUE_SECRET_REGION': cb.BuildEnvironmentVariable( @@ -180,10 +194,4 @@ def __init__(self, scope: Construct, construct_id: str, vpc=self.vpc, artifact=self.artifact) wb.node.add_dependency(workers) - self.repository.grant_pull_push(wb.role) - - - - - - + self.source_code.grant_read_write(wb.role) \ No newline at end of file diff --git a/src/workbench.py b/src/workbench.py index 8d21e2b..3a246ad 100644 --- a/src/workbench.py +++ b/src/workbench.py @@ -7,7 +7,6 @@ aws_iam as iam, aws_ec2 as ec2, aws_s3 as s3) -import json from botocore.exceptions import ClientError import boto3 @@ -102,14 +101,17 @@ def __init__(self, scope: Construct, id: str, instance_name=f'{project_name}-{env_name}-workbench', instance_type=ec2.InstanceType(config.instance_type), machine_image=machine_image, + block_devices=block_devices, role=self.role, security_group=self.sg, vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), - user_data=ec2.UserData.for_windows(persist=False)) + user_data=ec2.UserData.for_windows(persist=True)) + account_id = Stack.of(self).account region = Stack.of(self).region + source_bucket_name = f'{project_name}-{env_name}-sourcecode-{account_id}-{region}' self.instance.user_data.add_commands( f"[Environment]::SetEnvironmentVariable('AWS_DEFAULT_REGION', '{region}', 'Machine')") self.instance.user_data.add_commands( @@ -118,6 +120,10 @@ def __init__(self, scope: Construct, id: str, f"[Environment]::SetEnvironmentVariable('ARTIFACT_BUCKET_NAME', '{artifact.bucket_name}', 'Machine')") self.instance.user_data.add_commands( f"[Environment]::SetEnvironmentVariable('ARTIFACT_BUCKET_NAME', '{artifact.bucket_name}')") + self.instance.user_data.add_commands( + f"[Environment]::SetEnvironmentVariable('SOURCE_BUCKET_NAME', '{source_bucket_name}', 'Machine')") + self.instance.user_data.add_commands( + f"[Environment]::SetEnvironmentVariable('SOURCE_BUCKET_NAME', '{source_bucket_name}')") for cmd in config.user_data: self.instance.user_data.add_commands(cmd) diff --git a/src/workers.py b/src/workers.py index 4dcbd60..e1f788f 100644 --- a/src/workers.py +++ b/src/workers.py @@ -7,14 +7,10 @@ RemovalPolicy, aws_iam as iam, aws_ec2 as ec2, - aws_autoscaling as asc, - aws_ssm as ssm, aws_amazonmq as amq, aws_secretsmanager as sm, aws_s3 as s3, - aws_logs as logs, - aws_codecommit as cc, -) + aws_logs as logs) import json from botocore.exceptions import ClientError import boto3 @@ -52,19 +48,16 @@ class VolumeModel(BaseModel): class WorkersModel(BaseModel): instance_type: str ami: AmiModel - launch_template_parameter: Optional[str] = None - max_capacity: int = Field(default=1) - min_capacity: int = Field(default=1) user_data: Optional[List[str]] volumes: List[VolumeModel] class Workers(Construct): def __init__(self, scope: Construct, id: str, - env_name: str, - project_name: str, - config: WorkersModel, - vpc: ec2.Vpc, - artifact: s3.Bucket): + env_name: str, + project_name: str, + config: WorkersModel, + vpc: ec2.Vpc, + artifact: s3.Bucket): super().__init__(scope, id) # Just allocate an IP for workers to access internet through NAT @@ -130,30 +123,18 @@ def __init__(self, scope: Construct, id: str, volume=ec2.BlockDeviceVolume.ebs(volume.size))) machine_image = ec2.MachineImage.generic_windows({region: ami_id}) - - self.launch_template = ec2.LaunchTemplate(self, 'LaunchTemplate', - launch_template_name=f'{project_name}-{env_name}-workbench', - associate_public_ip_address=False, - block_devices=block_devices, - http_tokens=ec2.LaunchTemplateHttpTokens.REQUIRED, + + self.instance = ec2.Instance(self, 'Instance', + instance_name=f'{project_name}-{env_name}-worker', instance_type=ec2.InstanceType(config.instance_type), machine_image=machine_image, - require_imdsv2=True, + block_devices=block_devices, role=self.role, security_group=self.sg, - user_data=ec2.UserData.for_windows(persist=True)) - - if (config.launch_template_parameter): - ssm.StringParameter(self, "LaunchTemplateID", - parameter_name=config.launch_template_parameter, - string_value=self.launch_template.launch_template_id) - - self.asc = asc.AutoScalingGroup(self,"ASG", vpc=vpc, - vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), - min_capacity=config.min_capacity, - max_capacity=config.max_capacity, - launch_template=self.launch_template) + vpc_subnets=ec2.SubnetSelection( + subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), + user_data=ec2.UserData.for_windows(persist=True)) secret_name = f'/{project_name}-{env_name}/broker_credentials' self.secret = sm.Secret(self, "Secret", @@ -182,6 +163,7 @@ def __init__(self, scope: Construct, id: str, users=[amq.CfnBroker.UserProperty( username=self.secret.secret_value_from_json("username").unsafe_unwrap(), password=self.secret.secret_value_from_json("password").unsafe_unwrap())]) + self.broker.apply_removal_policy(RemovalPolicy.DESTROY) # Access to RabbitMQ and its management UI self.sg.add_ingress_rule( @@ -191,26 +173,32 @@ def __init__(self, scope: Construct, id: str, peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(443)) + account_id = Stack.of(self).account region = Stack.of(self).region - self.launch_template.user_data.add_commands( + source_bucket_name = f'{project_name}-{env_name}-sourcecode-{account_id}-{region}' + self.instance.user_data.add_commands( f"[System.Environment]::SetEnvironmentVariable('WORKER_QUEUE_BROKER_NAME', '{broker_name}', 'Machine')") - self.launch_template.user_data.add_commands( + self.instance.user_data.add_commands( f"[System.Environment]::SetEnvironmentVariable('WORKER_QUEUE_SECRET_NAME', '{self.secret.secret_name}', 'Machine')") - self.launch_template.user_data.add_commands( + self.instance.user_data.add_commands( f"[System.Environment]::SetEnvironmentVariable('WORKER_QUEUE_SECRET_REGION', '{region}', 'Machine')") - self.launch_template.user_data.add_commands( + self.instance.user_data.add_commands( f"[System.Environment]::SetEnvironmentVariable('WORKER_LOG_GROUP_NAME', '{log_group_name}', 'Machine')") - self.launch_template.user_data.add_commands( + self.instance.user_data.add_commands( f"[Environment]::SetEnvironmentVariable('AWS_DEFAULT_REGION', '{region}', 'Machine')") - self.launch_template.user_data.add_commands( + self.instance.user_data.add_commands( f"[Environment]::SetEnvironmentVariable('AWS_DEFAULT_REGION', '{region}')") - self.launch_template.user_data.add_commands( + self.instance.user_data.add_commands( f"[Environment]::SetEnvironmentVariable('ARTIFACT_BUCKET_NAME', '{artifact.bucket_name}', 'Machine')") - self.launch_template.user_data.add_commands( + self.instance.user_data.add_commands( f"[Environment]::SetEnvironmentVariable('ARTIFACT_BUCKET_NAME', '{artifact.bucket_name}')") + self.instance.user_data.add_commands( + f"[Environment]::SetEnvironmentVariable('SOURCE_BUCKET_NAME', '{source_bucket_name}', 'Machine')") + self.instance.user_data.add_commands( + f"[Environment]::SetEnvironmentVariable('SOURCE_BUCKET_NAME', '{source_bucket_name}')") for cmd in config.user_data: - self.launch_template.user_data.add_commands(cmd) + self.instance.user_data.add_commands(cmd) # Workers access Internet with this NAT gateway nat_gateway = ec2.CfnNatGateway(self, 'NATGateway', @@ -221,5 +209,4 @@ def __init__(self, scope: Construct, id: str, ec2.CfnRoute(self, id = 'NatRoute' + str(id), route_table_id=subnet.route_table.route_table_id, destination_cidr_block='0.0.0.0/0', - nat_gateway_id=nat_gateway.ref) - + nat_gateway_id=nat_gateway.ref) \ No newline at end of file