Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 90 additions & 0 deletions scripts/list-orphaned-assets.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
#!/bin/bash
set -e # exit if a command fails
set -u # error/exit if variables are unset
set -f # disable globbing
set -o pipefail # if a command in a pipeline fails, the whole pipeline fails

SCENARIO_TABLE_LOGICAL_ID="DLTTestRunnerStorageDLTScenariosTableAB6F5C2A"
SCENARIO_BUCKET_LOGICAL_ID="DLTTestRunnerStorageDLTScenariosBucketA9290D21"
HISTORY_TABLE_LOGICAL_ID="DLTTestRunnerStorageDLTHistoryTable46D850CC"
SERVICES_FUNCTION_LOGICAL_ID="DLTApiDLTAPIServicesLambda9D76BA5C"

stack_name=$1
# todo: output instead of resource?
scenario_table=$(aws cloudformation describe-stack-resource \
--stack-name "$stack_name" --logical-resource-id "$SCENARIO_TABLE_LOGICAL_ID" \
--query "StackResourceDetail.PhysicalResourceId" --output text)
scenario_bucket=$(aws cloudformation describe-stack-resource \
--stack-name "$stack_name" --logical-resource-id "$SCENARIO_BUCKET_LOGICAL_ID" \
--query "StackResourceDetail.PhysicalResourceId" --output text)
history_table=$(aws cloudformation describe-stack-resource \
--stack-name "$stack_name" --logical-resource-id "$HISTORY_TABLE_LOGICAL_ID" \
--query "StackResourceDetail.PhysicalResourceId" --output text)
services_function_name=$(aws cloudformation describe-stack-resource \
--stack-name "$stack_name" --logical-resource-id "${SERVICES_FUNCTION_LOGICAL_ID}" \
--query "StackResourceDetail.PhysicalResourceId" --output text)
services_function_arn=$(aws lambda get-function --function-name "$services_function_name" \
--query "Configuration.FunctionArn" --output text)

# the cli should paginate this call, but this isn't tested.
test_ids=$(aws dynamodb scan \
--table-name $scenario_table --projection-expression testId \
--query "Items[].testId.S" --output text)

# we assume the format is always public/test-scenarios/${testType}/${id}.${ext}
script_assets=$(aws s3api list-objects-v2 \
--bucket "$scenario_bucket" --prefix "public/test-scenarios/" \
--query "Contents[].Key" --output text)
for asset in $script_assets; do
test_id=$(echo "$asset" | cut -d'/' -f4 | cut -d'.' -f1)
if [[ ! "$test_ids" =~ $test_id ]]; then
echo "s3://$scenario_bucket/$asset"
fi
done

# we assume the format is always test-scenarios/${id}-${region}.json
json_assets=$(aws s3api list-objects-v2 \
--bucket "$scenario_bucket" --prefix "test-scenarios/" \
--query "Contents[].Key" --output text)
for asset in $json_assets; do
test_id=$(echo "$asset" | cut -d'/' -f2 | cut -d'-' -f1)
if [[ ! "$test_ids" =~ $test_id ]]; then
echo "s3://$scenario_bucket/$asset"
fi
done

# we assume the format is always results/${id}/*
result_assets=$(aws s3api list-objects-v2 \
--bucket "$scenario_bucket" --prefix "results/" \
--query "Contents[].Key" --output text)
for asset in $result_assets; do
test_id=$(echo "$asset" | cut -d'/' -f2)
if [[ ! "$test_ids" =~ $test_id ]]; then
echo "s3://$scenario_bucket/$asset"
fi
done

# History Table should not have orphaned resources, but we check anyway
# for the next history query we return two values (tab seperated), so we only split on new lines
SAVEIFS=$IFS;IFS=$'\n'
history_assets=$(aws dynamodb scan \
--table-name "$history_table" --projection-expression testId,testRunId \
--query "Items[].[testId.S,testRunId.S]" --output text)
for asset in $history_assets; do
test_id=$(echo "$asset" | cut -f1)
test_run_id=$(echo "$asset" | cut -f2)
if [[ ! "$test_ids" =~ $test_id ]]; then
echo "dynamodb://$history_table/$test_id/$test_run_id"
fi
done
IFS=$SAVEIFS

# There should be no eventbridge rules left over, but we check anyway
rules=$(aws events list-rule-names-by-target --target-arn "$services_function_arn" \
--query "RuleNames" --output text)
for rule in $rules; do
# `${rule//Scheduled/}` removes the word Scheduled from the rule variable
if [[ ! "$test_ids" =~ ${rule//Scheduled/} ]]; then
echo "events://rules/${rule}"
fi
done
64 changes: 64 additions & 0 deletions source/api-services/lib/scenarios/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -645,6 +645,64 @@ const writeTestScenarioToS3 = async (testTaskConfigs, testScenario, testId) => {
}
};

/**
*
* @param {object} testTaskConfigs
* @param {string} testId
*/
const deleteTestScenarioFromS3 = async (testTaskConfigs, testId) => {
try {
const s3Promises = testTaskConfigs.map((testTaskConfig) => {
const params = {
Bucket: SCENARIOS_BUCKET,
Key: `test-scenarios/${testId}-${testTaskConfig.region}.json`,
};
return s3.deleteObject(params).promise();
});
// deleteObject also returns without exception if the object does not exist
await Promise.all(s3Promises);
} catch (err) {
console.error(err);
throw err;
}
};

/**
*
* @param {string} testId
* @param {string} testType
* @param {string} fileType
* @returns {Promise<void>}
*/
const deleteTestAssetsFromS3 = async (testId, testType, fileType) => {
// the creation is done in the frontend, so changes should stay in sync between both
if (testType === "simple") {
// simple tests don't have assets
return;
}
let extension = "";
if (fileType === "zip") {
extension = "zip";
} else if (testType === "jmeter") {
extension = "jmx";
} else if (testType === "k6") {
extension = "js";
} else {
console.error(`Invalid testType: ${testType}`);
throw new ErrorException("InvalidParameter", "Invalid test type.");
}
try {
const params = {
Bucket: SCENARIOS_BUCKET,
Key: `public/test-scenarios/${testType}/${testId}.${extension}`,
};
await s3.deleteObject(params).promise();
} catch (err) {
console.error(err);
throw err;
}
};

/**
*
* @param {object} testTaskConfigs
Expand Down Expand Up @@ -1152,10 +1210,16 @@ const deleteTest = async (testId, functionName) => {
await lambda.removePermission({ FunctionName: functionName, StatementId: ruleName }).promise();
await cloudwatchevents.deleteRule({ Name: ruleName }).promise();
}
//Delete test
await deleteDDBTestEntry(testId);
//Delete history
const testRunIds = await getTestHistoryTestRunIds(testId);
const testRuns = createBatchRequestItems(testId, testRunIds);
await parseBatchRequests(testRuns);
//Delete s3 files
await deleteTestScenarioFromS3(testAndRegionalInfraConfigs.testTaskConfigs, testId);
await deleteTestAssetsFromS3(testId, testAndRegionalInfraConfigs.testType, testAndRegionalInfraConfigs.fileType);

return "success";
} catch (err) {
console.error(err);
Expand Down
31 changes: 31 additions & 0 deletions source/api-services/lib/scenarios/index.spec.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ const mockAWS = require("aws-sdk");

mockAWS.S3 = jest.fn(() => ({
putObject: mockS3,
deleteObject: mockS3,
}));
mockAWS.StepFunctions = jest.fn(() => ({
startExecution: mockStepFunctions,
Expand Down Expand Up @@ -76,6 +77,7 @@ let getData = {
name: "mytest",
status: "running",
testScenario: '{"name":"example"}',
testType: "simple",
testTaskConfigs: [
{
region: "us-east-1",
Expand All @@ -93,6 +95,7 @@ let getDataWithConfigs = {
name: "mytest",
status: "running",
testScenario: '{"name":"example"}',
testType: "simple",
testTaskConfigs: [
{
region: "us-east-1",
Expand Down Expand Up @@ -129,6 +132,7 @@ let getDataWithNoConfigs = {
name: "mytest",
status: "running",
testScenario: '{"name":"example"}',
testType: "simple",
},
};

Expand All @@ -138,6 +142,7 @@ let getDataWithEmptyConfigs = {
name: "mytest",
status: "running",
testScenario: '{"name":"example"}',
testType: "simple",
testTaskConfigs: [{}],
},
};
Expand Down Expand Up @@ -1644,11 +1649,21 @@ describe("#SCENARIOS API:: ", () => {
return Promise.resolve();
},
}));
mockS3.mockImplementationOnce(() => ({
promise() {
// deleteObject
return Promise.resolve();
},
}));

const response = await lambda.deleteTest(testId, context.functionName);
const expectedDeleteDashboardParams = [`EcsLoadTesting-${testId}-${getRegionalConf.Item.region}`];
expect(response).toEqual("success");
expect(mockCloudWatch).toHaveBeenCalledWith({ DashboardNames: expectedDeleteDashboardParams });
expect(mockS3).toHaveBeenCalledWith({
Bucket: "bucket",
Key: `test-scenarios/${testId}-${getRegionalConf.Item.region}.json`,
});
});

it('should return "SUCCESS" when "DELETETEST" has unprocessed entries from "deleteTestHistory', async () => {
Expand Down Expand Up @@ -1713,11 +1728,21 @@ describe("#SCENARIOS API:: ", () => {
return Promise.resolve();
},
}));
mockS3.mockImplementationOnce(() => ({
promise() {
// deleteObject
return Promise.resolve();
},
}));

const response = await lambda.deleteTest(testId, context.functionName);
const expectedDeleteDashboardParams = [`EcsLoadTesting-${testId}-${getRegionalConf.Item.region}`];
expect(response).toEqual("success");
expect(mockCloudWatch).toHaveBeenCalledWith({ DashboardNames: expectedDeleteDashboardParams });
expect(mockS3).toHaveBeenCalledWith({
Bucket: "bucket",
Key: `test-scenarios/${testId}-${getRegionalConf.Item.region}.json`,
});
});

it('DELETE should return "SUCCESS" when no metrics are found', async () => {
Expand Down Expand Up @@ -1779,6 +1804,12 @@ describe("#SCENARIOS API:: ", () => {
return Promise.resolve();
},
}));
mockS3.mockImplementationOnce(() => ({
promise() {
// deleteObject
return Promise.resolve();
},
}));

const response = await lambda.deleteTest(testId, context.functionName);
expect(response).toEqual("success");
Expand Down
15 changes: 12 additions & 3 deletions source/infrastructure/lib/back-end/scenarios-storage.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

import { RemovalPolicy } from "aws-cdk-lib";
import { Duration, RemovalPolicy } from "aws-cdk-lib";
import { AttributeType, BillingMode, Table, TableEncryption } from "aws-cdk-lib/aws-dynamodb";
import { AnyPrincipal, Effect, Policy, PolicyStatement } from "aws-cdk-lib/aws-iam";
import { BlockPublicAccess, Bucket, BucketEncryption, HttpMethods } from "aws-cdk-lib/aws-s3";
import { BlockPublicAccess, Bucket, BucketEncryption, HttpMethods, LifecycleRule } from "aws-cdk-lib/aws-s3";
import { Construct } from "constructs";

export interface ScenarioTestRunnerStorageConstructProps {
Expand All @@ -14,6 +14,8 @@ export interface ScenarioTestRunnerStorageConstructProps {
readonly cloudFrontDomainName: string;
// Solution Id
readonly solutionId: string;
// Lifecycle Rules
readonly lifecycleRules?: LifecycleRule[];
}

/**
Expand All @@ -32,13 +34,20 @@ export class ScenarioTestRunnerStorageConstruct extends Construct {
constructor(scope: Construct, id: string, props: ScenarioTestRunnerStorageConstructProps) {
super(scope, id);

// we always include a lifecycleRule to generic cleanup tasks
const lifecycleRules = (props.lifecycleRules || []).concat({
enabled: true,
abortIncompleteMultipartUploadAfter: Duration.days(7), // if a multipart upload is not complete, abort it after 1 week
expiredObjectDeleteMarker: true, // remove delete marker after all versions are deleted
});
this.scenariosBucket = new Bucket(this, "DLTScenariosBucket", {
removalPolicy: RemovalPolicy.RETAIN,
serverAccessLogsBucket: props.s3LogsBucket,
serverAccessLogsPrefix: "scenarios-bucket-access/",
encryption: BucketEncryption.KMS_MANAGED,
enforceSSL: true,
versioned: true,
lifecycleRules: lifecycleRules,
blockPublicAccess: BlockPublicAccess.BLOCK_ALL,
cors: [
{
Expand Down Expand Up @@ -68,7 +77,7 @@ export class ScenarioTestRunnerStorageConstruct extends Construct {
statements: [
new PolicyStatement({
effect: Effect.ALLOW,
actions: ["s3:HeadObject", "s3:PutObject", "s3:GetObject", "s3:ListBucket"],
actions: ["s3:HeadObject", "s3:PutObject", "s3:GetObject", "s3:DeleteObject", "s3:ListBucket"],
resources: [this.scenariosBucket.bucketArn, `${this.scenariosBucket.bucketArn}/*`],
}),
],
Expand Down
Loading