From 98492b84b265f045dfe039855d7cf7fcd36bb56c Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Fri, 26 Apr 2024 15:39:59 -1000 Subject: [PATCH 01/28] Running via AWS Managed Apache Flink works --- .gitignore | 3 +- README.md | 88 ++++++++---- entrypoint.sh | 2 +- managed-flink-poc-bucket.yaml | 34 +++++ managed-flink-poc.yaml | 130 ++++++++++++++++++ poc-get-events.sh | 10 ++ poc-send-events.sh | 11 ++ poc-tail-logs.sh | 41 ++++++ pom.xml | 90 +++++------- .../stateful_functions/Configuration.java | 14 ++ .../com/example/stateful_functions/Main.java | 26 ++++ .../egress/EgressSerializer.java | 6 +- src/main/proto/envelope.proto | 2 +- 13 files changed, 371 insertions(+), 86 deletions(-) create mode 100644 managed-flink-poc-bucket.yaml create mode 100644 managed-flink-poc.yaml create mode 100755 poc-get-events.sh create mode 100755 poc-send-events.sh create mode 100755 poc-tail-logs.sh create mode 100644 src/main/java/com/example/stateful_functions/Main.java diff --git a/.gitignore b/.gitignore index 0c02a85..16409d5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ target dependency-reduced-pom.xml *~ -.*~ \ No newline at end of file +.*~ +.cwlogs diff --git a/README.md b/README.md index 862f22c..17cb9d4 100644 --- a/README.md +++ b/README.md @@ -91,43 +91,81 @@ Users running on Apple silicon should ensure that the file ~/.m2/settings.xml ex ``` -To compile the code and run the tests using the included Maven wrapper script... + +To compile the code and run the tests using the included Maven wrapper script, first see below about +building and installing Apache Flink Stateful Functions compatible with Flink 1.18, then do this: ``` ./mvnw test ``` -## Running the project via Docker Compose +## Running the project via AWS Managed Flink + +### Version compatibility between AWS Managed Flink and Stateful Functions + +The latest release of Apache Flink Stateful Functions is 3.3, but its compiled and built +to run with Flink 1.16.2. AWS Managed Flink supports Flink versions 1.15 and 1.18. So the first +step towards running via AWS Managed Flink is to create a version of the stateful functions library +compatible with Flink 1.18. The required changes are provided here: +https://github.com/kellinwood/flink-statefun/pull/1/files. +Clone that repo, checkout the `release-3.3-1.18` +branch, and build/install it locally via `mvn install` + +### Build and package this project +```shell +mvn package +``` + +### Create an S3 bucket and upload this project's JAR file -Follow the instructions below to run the project via Docker Compose. Note that Kinesis support is provided -by a [localstack](https://www.localstack.cloud/) container. +To create the bucket, create a CloudFormation stack named `managed-flink-code-bucket` as defined [here](./managed-flink-poc-bucket.yaml), +and after that finishes, use the AWS CLI to upload the jar file: -The demo works using three docker compose "profiles" (phases). -1. In the first phase, the flink cluster running our stateful function application is started, - along with localstack, and an aws-cli container that creates the ingress and egress Kinesis streams. -2. The second phase runs an aws-cli container to send events to the ingress stream. The events - sent are from [product-cart-integration-test-events.jsonl](./src/test/resources/product-cart-integration-test-events.jsonl) -3. The third phase runs an aws-cli container to fetch the events from the egress stream and output them to the console. ```shell -# Build this project and create the jar file -./mvnw package +export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account +aws s3 cp target/my-stateful-functions-embedded-java-3.3.0.jar \ + s3://managed-flink-code-bucket-codebucket-${AWS_ACCOUNT_ID}/ +``` -# Build the flink docker images, and re-run these if code changes have been made -docker compose build jobmanager -docker compose build taskmanager +### Create the Kinesis streams, Managed Flink application, and related AWS Resources -# The statefun profile starts localstack, creates the kinesis streams, and starts the Flink jobmanager and taskmanager -docker compose --profile statefun up -d +Create a CloudFormation stack named `managed-flink-poc` as defined by the templates [here](./managed-flink-poc.yaml) -# Optionally connect the IDE debugger to the taskmanager on localhost port 5066 at this point +### Configure the Managed Flink application using the AWS Web Console -# Send some events -docker compose --profile send-events up +Visit `Managed Apache Flink` in the AWS web console and click through to the Flink application +created via the CF stack above. Note that the application is in the "ready" state and is not +running yet. -# Get and display the events from the egress stream -# Note that some VPNs (i.e., ZScaler) can cause failures with 'yum'. The workaround is to disconnect from the VPN first. -docker compose --profile get-egress-events up +* Click the "Configure" button on the Flink application's detail page. +* Scoll down to the "Logging and monitoring" section. +* Click to turn on logging +* Click to use a custom log stream +* Click "Browse" to find the log stream +* Navigate through the log group named "managed-flink-poc-log-group..." and click the "Choose" button next to the + "managed-flink-poc-log-stream..." entry +* Under "Monitoring metrics level with CloudWatch" select "Operator" +* Scroll down to the bottom and click "Save changes" +* Once the changes have finished being saved, click the "Run" button to start the application. -# Shut everything down -docker compose --profile all down +Note that in many CloudFormation examples on how to deploy a Managed Flink application, the steps above +are performed via API calls made by in-line lambdas defined in the CF template. This is future work +for this example/demo project. + +### Monitor the CloudWatch logging output + +```shell +./poc-tail-logs.sh +``` +This script will show all the log entries from the start of application launch, and will +wait for new entries to arrive and display them too. The script will resume from where it +left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. +### Send sample events to the ingress stream +```shell +./poc-send-events.sh +``` + +### Get and display the events published to the egress stream +```shell +./poc-get-events.sh ``` diff --git a/entrypoint.sh b/entrypoint.sh index 9e9fab3..66c0690 100644 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -2,7 +2,7 @@ set -e -export ENABLE_BUILT_IN_PLUGINS=flink-s3-fs-hadoop-1.16.2.jar +export ENABLE_BUILT_IN_PLUGINS=flink-s3-fs-hadoop-1.18.1.jar # fix for rocksb memory fragmentation issue export LD_PRELOAD=$LD_PRELOAD:/usr/lib/x86_64-linux-gnu/libjemalloc.so diff --git a/managed-flink-poc-bucket.yaml b/managed-flink-poc-bucket.yaml new file mode 100644 index 0000000..6db29d5 --- /dev/null +++ b/managed-flink-poc-bucket.yaml @@ -0,0 +1,34 @@ +# A CloudFormation stack for the S3 bucket to which the flink stateful functions job JAR file will be uploaded. +# Once this bucket has been created, upload the JAR file, then create the managed-flink-poc stack. +Resources: + CodeBucket: + Type: AWS::S3::Bucket + Properties: + BucketName: !Sub ${AWS::StackName}-codebucket-${AWS::AccountId} + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: aws:kms + KMSMasterKeyID: alias/aws/s3 + CodeBucketBucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: !Ref CodeBucket + PolicyDocument: + Id: RequireEncryptionInTransit + Version: '2012-10-17' + Statement: + - Principal: '*' + Action: '*' + Effect: Deny + Resource: + - !GetAtt CodeBucket.Arn + - !Sub ${CodeBucket.Arn}/* + Condition: + Bool: + aws:SecureTransport: 'false' +Outputs: + ManagedFlinkCodeBucketArn: + Value: !GetAtt CodeBucket.Arn + Export: + Name: ManagedFlinkCodeBucketArn # Exported for use by the stack defined in managed-flink-poc.yaml diff --git a/managed-flink-poc.yaml b/managed-flink-poc.yaml new file mode 100644 index 0000000..50f6aba --- /dev/null +++ b/managed-flink-poc.yaml @@ -0,0 +1,130 @@ +# A CloudFormation stack containing all resources except for the S3 bucket containing the statefun application JAR. +Resources: + ManagedFlinkIngressStream: + Type: AWS::Kinesis::Stream + Properties: + ShardCount: 1 + StreamEncryption: + EncryptionType: KMS + KeyId: alias/aws/kinesis + StreamModeDetails: + StreamMode: PROVISIONED + ManagedFlinkEgressStream: + Type: AWS::Kinesis::Stream + Properties: + ShardCount: 1 + StreamEncryption: + EncryptionType: KMS + KeyId: alias/aws/kinesis + StreamModeDetails: + StreamMode: PROVISIONED + ManagedFlinkLogGroup: + Type: 'AWS::Logs::LogGroup' + Properties: + LogGroupName: + !Sub ${AWS::StackName}-log-group-${AWS::AccountId} + RetentionInDays: 7 + UpdateReplacePolicy: Delete + DeletionPolicy: Delete + ManagedFlinkLogStream: + Type: 'AWS::Logs::LogStream' + Properties: + LogGroupName: + Ref: ManagedFlinkLogGroup + LogStreamName: + !Sub ${AWS::StackName}-log-stream-${AWS::AccountId} + UpdateReplacePolicy: Delete + DeletionPolicy: Delete + ManagedFlinkIAMRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: + - kinesisanalytics.amazonaws.com + Action: sts:AssumeRole + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonKinesisFullAccess + - arn:aws:iam::aws:policy/AmazonS3FullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + Path: / + Policies: + - PolicyDocument: + Statement: + - Action: + - 'kinesis:DescribeStream' + - 'kinesis:GetRecords' + - 'kinesis:GetShardIterator' + - 'kinesis:ListShards' + Effect: Allow + Resource: + - 'Fn::GetAtt': + - ManagedFlinkIngressStream + - Arn + - 'Fn::GetAtt': + - ManagedFlinkEgressStream + - Arn + Version: '2012-10-17' + PolicyName: AccessKDSPolicy + - PolicyDocument: + Statement: + - Action: + - 'logs:DescribeLogGroups' + - 'logs:DescribeLogStreams' + - 'logs:PutLogEvents' + Effect: Allow + Resource: + 'Fn::GetAtt': + - ManagedFlinkLogGroup + - Arn + Version: '2012-10-17' + PolicyName: AccessCWLogsPolicy + - PolicyDocument: + Statement: + - Action: 'cloudwatch:PutMetricData' + Effect: Allow + Resource: '*' + Version: '2012-10-17' + PolicyName: AccessCWMetricsPolicy + ManagedFlinkApplication: + Type: AWS::KinesisAnalyticsV2::Application + Properties: + ApplicationName: 'ExampleManagedFlinkApplication' + ApplicationDescription: 'Example Managed Flink Application' + RuntimeEnvironment: 'FLINK-1_18' + ServiceExecutionRole: !GetAtt ManagedFlinkIAMRole.Arn + ApplicationConfiguration: + EnvironmentProperties: + PropertyGroups: + - PropertyGroupId: 'StatefunApplicationProperties' + PropertyMap: + EVENTS_INGRESS_STREAM_DEFAULT: !Ref ManagedFlinkIngressStream + EVENTS_EGRESS_STREAM_DEFAULT: !Ref ManagedFlinkEgressStream + AWS_REGION: !Ref AWS::Region + FlinkApplicationConfiguration: + CheckpointConfiguration: + ConfigurationType: 'CUSTOM' + CheckpointingEnabled: True + CheckpointInterval: 900000 # Every fifteen minutes + MinPauseBetweenCheckpoints: 500 + MonitoringConfiguration: + ConfigurationType: 'CUSTOM' + MetricsLevel: 'APPLICATION' + LogLevel: 'INFO' + ParallelismConfiguration: + ConfigurationType: 'CUSTOM' + Parallelism: 1 + ParallelismPerKPU: 1 + AutoScalingEnabled: True + ApplicationSnapshotConfiguration: + SnapshotsEnabled: True + ApplicationCodeConfiguration: + CodeContent: + S3ContentLocation: + BucketARN: !ImportValue ManagedFlinkCodeBucketArn # Created and exported by the stack defined in managed-flink-poc-bucket.yaml + FileKey: "my-stateful-functions-embedded-java-3.3.0.jar" + CodeContentType: 'ZIPFILE' + diff --git a/poc-get-events.sh b/poc-get-events.sh new file mode 100755 index 0000000..b1291d5 --- /dev/null +++ b/poc-get-events.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep ManagedFlinkEgressStream) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +for encoded_data in $(aws kinesis get-records --shard-iterator $shard_iterator | jq -crM .Records[].Data); do + echo $encoded_data | base64 -d | jq . +done diff --git a/poc-send-events.sh b/poc-send-events.sh new file mode 100755 index 0000000..6ff5744 --- /dev/null +++ b/poc-send-events.sh @@ -0,0 +1,11 @@ +#! /bin/bash + +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep ManagedFlinkIngressStream) + +grep -v test.action src/test/resources/product-cart-integration-test-events.jsonl | while read line; do + partkey=$(echo $line | md5sum | awk '{print $1}') + data=$(echo $line | base64) + cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" + echo $cmd + eval $cmd +done diff --git a/poc-tail-logs.sh b/poc-tail-logs.sh new file mode 100755 index 0000000..2a326e5 --- /dev/null +++ b/poc-tail-logs.sh @@ -0,0 +1,41 @@ +#! /bin/bash + +set -e + +cd $(dirname $0) + +AWS_ACCOUNT_ID=${AWS_ACCOUNT_ID:-516535517513} +NEXT_TOKEN_ARG= + +CWLOGS_DIR=.cwlogs +mkdir -p $CWLOGS_DIR + +ITERATION=1 + +if [ -f $CWLOGS_DIR/next.token ]; then + NEXT_TOKEN_ARG="--next-token $(cat $CWLOGS_DIR/next.token)" +fi + +while true; do + CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json + aws logs get-log-events \ + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name managed-flink-poc-log-group-${AWS_ACCOUNT_ID} \ + --log-stream-name managed-flink-poc-log-stream-${AWS_ACCOUNT_ID} \ + >$CWLOG_FILE + + NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) + echo $NEXT_TOKEN >$CWLOGS_DIR/next.token + NEXT_TOKEN_ARG="--next-token $NEXT_TOKEN" + EVENT_COUNT=$(cat $CWLOG_FILE | jq -crM '.events | length') + + if [[ $EVENT_COUNT == 0 ]]; then + sleep 2 + rm $CWLOG_FILE + else + cat $CWLOG_FILE | jq -crM '.events[] | [.timestamp,(.message | fromjson | [.messageType,.logger,.message] | join(" "))] | join(" ")' | tee -a $CWLOGS_DIR/formatted.log + fi + + ITERATION=$(echo "1 + $ITERATION" | bc) +done diff --git a/pom.xml b/pom.xml index 952d300..561ba45 100644 --- a/pom.xml +++ b/pom.xml @@ -15,13 +15,15 @@ UTF-8 - 3.3.0 - 1.16.2 + 3.3-1.18 + 1.18.1 + 3.7.1 11 ${java.version} ${java.version} true 2.20.162 + 1.2.0 @@ -49,27 +51,6 @@ apache-client - - software.amazon.awssdk - regions - - - - software.amazon.awssdk - auth - - - - software.amazon.awssdk - opensearch - - - - - software.amazon.awssdk - sts - @@ -85,12 +66,35 @@ - + org.apache.flink - statefun-sdk-embedded + statefun-flink-distribution ${statefun.version} - provided + + + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + + org.apache.flink + flink-connector-kafka + + + org.apache.flink + statefun-kafka-io + + + + + com.amazonaws + aws-kinesisanalytics-runtime + ${kda.runtime.version} @@ -109,15 +113,9 @@ com.google.protobuf protobuf-java - 3.16.3 + ${protobuf.version} - - - org.apache.flink - statefun-flink-datastream - ${statefun.version} - @@ -151,20 +149,6 @@ 3.11 - - - org.apache.flink - flink-state-processor-api - ${flink.version} - - - - org.apache.flink - statefun-flink-distribution - ${statefun.version} - - - org.apache.flink @@ -173,12 +157,6 @@ test - - org.apache.flink - statefun-flink-state-processor - ${statefun.version} - - org.springframework.boot spring-boot-starter-test @@ -255,7 +233,7 @@ protobuf-maven-plugin 0.6.1 - com.google.protobuf:protoc:3.15.8:exe:${os.detected.classifier} + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} @@ -339,7 +317,7 @@ - org.apache.flink.statefun.flink.core.StatefulFunctionsJob + com.example.stateful_functions.Main @@ -409,7 +387,7 @@ org.xolstice.maven.plugins protobuf-maven-plugin - com.il.otk.com.google.protobuf:protoc:3.15.8:exe:${os.detected.classifier}-alpine + com.il.otk.com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}-alpine diff --git a/src/main/java/com/example/stateful_functions/Configuration.java b/src/main/java/com/example/stateful_functions/Configuration.java index 3681993..b83cdd6 100644 --- a/src/main/java/com/example/stateful_functions/Configuration.java +++ b/src/main/java/com/example/stateful_functions/Configuration.java @@ -1,5 +1,6 @@ package com.example.stateful_functions; +import com.amazonaws.services.kinesisanalytics.runtime.KinesisAnalyticsRuntime; import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -7,6 +8,8 @@ import java.lang.reflect.Field; import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Properties; public class Configuration { @@ -57,6 +60,17 @@ private static final Properties getProperties() { properties.putAll(env); + // If deployed in AWS Managed Flink, then get our config from + // KinesisAnalyticsRuntime.getApplicationProperties().get("StatefunApplicationProperties") + try { + Optional.ofNullable(KinesisAnalyticsRuntime.getApplicationProperties()) + .map(ap -> ap.get("StatefunApplicationProperties")) + .filter(Objects::nonNull) + .ifPresent(ap -> properties.putAll(ap)); + } + catch (Exception x) { + LOG.warn(x.getMessage(), x); + } return properties; } diff --git a/src/main/java/com/example/stateful_functions/Main.java b/src/main/java/com/example/stateful_functions/Main.java new file mode 100644 index 0000000..da6c1ab --- /dev/null +++ b/src/main/java/com/example/stateful_functions/Main.java @@ -0,0 +1,26 @@ +package com.example.stateful_functions; + +import org.apache.flink.statefun.flink.core.StatefulFunctionsConfig; +import org.apache.flink.statefun.flink.core.StatefulFunctionsJob; +import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverseProvider; +import org.apache.flink.statefun.flink.core.spi.Modules; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; + +/** Use the main() method here instead of StatefulFunctionJob.main() as described in AWS Managed Flink docs */ +public class Main { + + public static void main(String... args) throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + + StatefulFunctionsConfig stateFunConfig = StatefulFunctionsConfig.fromEnvironment(env); + stateFunConfig.setProvider((StatefulFunctionsUniverseProvider) (classLoader, statefulFunctionsConfig) -> { + Modules modules = Modules.loadFromClassPath(stateFunConfig); + return modules.createStatefulFunctionsUniverse(); + }); + + + StatefulFunctionsJob.main(env, stateFunConfig); + } + +} + diff --git a/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java b/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java index eaf9aec..34d425d 100644 --- a/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java +++ b/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java @@ -8,6 +8,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Optional; + import static java.nio.charset.StandardCharsets.UTF_8; @@ -19,8 +21,8 @@ public class EgressSerializer implements KinesisEgressSerializer Date: Tue, 7 May 2024 17:09:44 -1000 Subject: [PATCH 02/28] Updated CloudFormation templates. Includes custom resource for programmatic logging config and Flink auto-start --- managed-flink-poc-bucket.yaml | 1 + managed-flink-poc.yaml | 114 +++++++++++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 3 deletions(-) diff --git a/managed-flink-poc-bucket.yaml b/managed-flink-poc-bucket.yaml index 6db29d5..ce52994 100644 --- a/managed-flink-poc-bucket.yaml +++ b/managed-flink-poc-bucket.yaml @@ -1,5 +1,6 @@ # A CloudFormation stack for the S3 bucket to which the flink stateful functions job JAR file will be uploaded. # Once this bucket has been created, upload the JAR file, then create the managed-flink-poc stack. +Description: "Bucket and policy to hold statefun JAR file for Managed Flink proof-of-concept. Contact: Ken Ellinwood" Resources: CodeBucket: Type: AWS::S3::Bucket diff --git a/managed-flink-poc.yaml b/managed-flink-poc.yaml index 50f6aba..46ad4c6 100644 --- a/managed-flink-poc.yaml +++ b/managed-flink-poc.yaml @@ -1,4 +1,5 @@ # A CloudFormation stack containing all resources except for the S3 bucket containing the statefun application JAR. +Description: "Stack to run Managed Flink proof-of-concept. Contact: Ken Ellinwood" Resources: ManagedFlinkIngressStream: Type: AWS::Kinesis::Stream @@ -92,8 +93,8 @@ Resources: ManagedFlinkApplication: Type: AWS::KinesisAnalyticsV2::Application Properties: - ApplicationName: 'ExampleManagedFlinkApplication' - ApplicationDescription: 'Example Managed Flink Application' + ApplicationName: 'ManagedFlinkPOCApplication' + ApplicationDescription: 'Managed Flink POC Application' RuntimeEnvironment: 'FLINK-1_18' ServiceExecutionRole: !GetAtt ManagedFlinkIAMRole.Arn ApplicationConfiguration: @@ -126,5 +127,112 @@ Resources: S3ContentLocation: BucketARN: !ImportValue ManagedFlinkCodeBucketArn # Created and exported by the stack defined in managed-flink-poc-bucket.yaml FileKey: "my-stateful-functions-embedded-java-3.3.0.jar" - CodeContentType: 'ZIPFILE' + CodeContentType: 'ZIPFILE' + ManagedFlinkCustomResource: + Description: Invokes ManagedFlinkCRLambda to update and start the Flink application via API calls + Type: AWS::CloudFormation::CustomResource + DependsOn: ManagedFlinkCRLambda + Version: "1.0" + Properties: + ServiceToken: !GetAtt ManagedFlinkCRLambda.Arn + Region: !Ref AWS::Region + ApplicationName: !Ref ManagedFlinkApplication + # LogStream ARN format: arn:aws:logs:REGION:ACCOUNT_NUMBER:log-group:LOG_GROUP_NAME:log-stream:LOG_STREAM_NAME + # We get most of this from the LogGroup ARN, then remove the trailing "*" and append "log-stream:LOG_STREAM_NAME" + LogStreamArn: !Join [ "", [ !Select [ 0, !Split [ "*", !GetAtt ManagedFlinkLogGroup.Arn ] ], "log-stream:", !Ref ManagedFlinkLogStream ] ] + ManagedFlinkCRLambdaRole: + Type: AWS::IAM::Role + DependsOn: + - ManagedFlinkApplication + - ManagedFlinkLogStream + Properties: + Description: A role for the custom resource lambda to use while interacting with an application. + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - sts:AssumeRole + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonKinesisAnalyticsFullAccess + - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess + Path: / + ManagedFlinkCRLambda: + Type: AWS::Lambda::Function + DependsOn: ManagedFlinkCRLambdaRole + Properties: + Description: Configures logging and starts the Flink application + Runtime: python3.8 + Role: !GetAtt ManagedFlinkCRLambdaRole.Arn + Handler: index.lambda_handler + Timeout: 30 + Code: + ZipFile: | + import logging + import cfnresponse + import boto3 + + logger = logging.getLogger() + logger.setLevel(logging.INFO) + + def lambda_handler(event, context): + logger.info('Incoming CFN event {}'.format(event)) + + try: + event_type = event['RequestType'] + resource_props = event['ResourceProperties'] + application_name = resource_props['ApplicationName'] + + # Ignore events other than Create or Update, + if event_type not in ['Create', 'Update']: + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) + return + + # kinesisanalyticsv2 API reference: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesisanalyticsv2.html + client_kda = boto3.client('kinesisanalyticsv2', region_name=event['ResourceProperties']['Region']) + + describe_response = client_kda.describe_application(ApplicationName=application_name) + logger.info(f'describe_application response: {describe_response}') + + if event_type == 'Create': + # Add cloudwatch logging option + log_stream_arn = resource_props['LogStreamArn'] + conditional_token = describe_response['ApplicationDetail']['ConditionalToken'] + response = client_kda.add_application_cloud_watch_logging_option( + ApplicationName = application_name, + CloudWatchLoggingOption = { + 'LogStreamARN': log_stream_arn + }, + ConditionalToken = conditional_token + ) + logger.info(f'add_application_cloud_watch_logging_option response: {response}') + + # get application status. + application_status = describe_response['ApplicationDetail']['ApplicationStatus'] + + # an application can be started from 'READY' status only. + if application_status != 'READY': + logger.info('No-op for Application {} because ApplicationStatus {} is filtered'.format(application_name, application_status)) + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) + + return + + # create RunConfiguration. + run_configuration = { + 'ApplicationRestoreConfiguration': { + 'ApplicationRestoreType': 'RESTORE_FROM_LATEST_SNAPSHOT', + } + } + + logger.info('RunConfiguration for Application {}: {}'.format(application_name, run_configuration)) + # this call doesn't wait for an application to transfer to 'RUNNING' state. + client_kda.start_application(ApplicationName=application_name, RunConfiguration=run_configuration) + logger.info('Started Application: {}'.format(application_name)) + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) + except Exception as err: + logger.error(err) + cfnresponse.send(event,context, cfnresponse.FAILED, {"Data": str(err)}) From cd74e6d725a7fc1ca527807f7858aa595cfbb6d5 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Tue, 7 May 2024 19:35:43 -1000 Subject: [PATCH 03/28] Remove unnecessary manual steps that are now handled in the custom resource templates --- README.md | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 17cb9d4..f4cacee 100644 --- a/README.md +++ b/README.md @@ -123,33 +123,15 @@ and after that finishes, use the AWS CLI to upload the jar file: ```shell export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account aws s3 cp target/my-stateful-functions-embedded-java-3.3.0.jar \ - s3://managed-flink-code-bucket-codebucket-${AWS_ACCOUNT_ID}/ + s3://managed-flink-poc-bucket-codebucket-${AWS_ACCOUNT_ID}/ ``` ### Create the Kinesis streams, Managed Flink application, and related AWS Resources -Create a CloudFormation stack named `managed-flink-poc` as defined by the templates [here](./managed-flink-poc.yaml) +Create a CloudFormation stack named `managed-flink-poc` as defined by the CloudFormation templates [here](./managed-flink-poc.yaml). +This stack includes a custom resource lambda that programmatically configures logging when the Flink application is created, +and transitions the application from the Ready to Running state. -### Configure the Managed Flink application using the AWS Web Console - -Visit `Managed Apache Flink` in the AWS web console and click through to the Flink application -created via the CF stack above. Note that the application is in the "ready" state and is not -running yet. - -* Click the "Configure" button on the Flink application's detail page. -* Scoll down to the "Logging and monitoring" section. -* Click to turn on logging -* Click to use a custom log stream -* Click "Browse" to find the log stream -* Navigate through the log group named "managed-flink-poc-log-group..." and click the "Choose" button next to the - "managed-flink-poc-log-stream..." entry -* Under "Monitoring metrics level with CloudWatch" select "Operator" -* Scroll down to the bottom and click "Save changes" -* Once the changes have finished being saved, click the "Run" button to start the application. - -Note that in many CloudFormation examples on how to deploy a Managed Flink application, the steps above -are performed via API calls made by in-line lambdas defined in the CF template. This is future work -for this example/demo project. ### Monitor the CloudWatch logging output From d29585472168feb9d433c9d865467b56d8253e66 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Tue, 29 Oct 2024 11:00:36 -1000 Subject: [PATCH 04/28] Add app version to egressed events, run Flink 1.18 locally, and on Apple Silicon --- Dockerfile | 14 +++++++------- docker-compose.yml | 6 ++++++ .../example/stateful_functions/Configuration.java | 15 +++++++++++---- .../cloudevents/data/CartItemStatusDetails.java | 11 +++++++++++ .../function/cart/CartStatefulFunction.java | 2 ++ src/main/resources/application.properties | 1 + 6 files changed, 38 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index 08e6d8f..996b87c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,14 @@ -# The parent Flink image (flink:1.13.2-scala_2.12-java11) only contains the JRE (openjdk:11-jre), and it is missing key +# The parent Flink image (flink:1.18.1-java11) only contains the JRE (openjdk:11-jre), and it is missing key # diagnostic tools. This multistage build will overwrite the JRE with the JDK from openjdk:11 # See https://docs.docker.com/develop/develop-images/multistage-build/ -FROM openjdk:11 as jdk_image -FROM flink:1.16.2-java11 +FROM --platform=linux/amd64 openjdk:11 AS jdk_image +FROM --platform=linux/amd64 flink:1.18.1-java11 # Copy the JDK from the jdk_image -COPY --from=jdk_image /usr/local/openjdk-11 /usr/local/openjdk-11 +COPY --from=jdk_image /usr/local/openjdk-11 /opt/java/openjdk/ -RUN sed -i -e 's/^.*networkaddress.cache.ttl=.*$/networkaddress.cache.ttl=30/g' /usr/local/openjdk-11/conf/security/java.security -RUN sed -i -e 's/^.*networkaddress.cache.negative.ttl=.*$/networkaddress.cache.negative.ttl=10/g' /usr/local/openjdk-11/conf/security/java.security +RUN sed -i -e 's/^.*networkaddress.cache.ttl=.*$/networkaddress.cache.ttl=30/g' /opt/java/openjdk/conf/security/java.security +RUN sed -i -e 's/^.*networkaddress.cache.negative.ttl=.*$/networkaddress.cache.negative.ttl=10/g' /opt/java/openjdk/conf/security/java.security # The 2019 AWS rds root cert ADD rds-ca-2019-root.pem /etc/rds-ca-2019-root.pem @@ -43,7 +43,7 @@ RUN mkdir -p $FLINK_JOB_DIR COPY target/my-stateful-functions-embedded-java-3.3.0.jar ${FLINK_JOB_DIR}/flink-job.jar RUN chown -R flink:flink ${FLINK_JOB_DIR}/ -ENV PLUGIN_NAME flink-s3-fs-hadoop-1.16.2 +ENV PLUGIN_NAME flink-s3-fs-hadoop-1.18.1 RUN mkdir -p "${FLINK_HOME}/plugins/${PLUGIN_NAME}" RUN ln -fs "${FLINK_HOME}/opt/${PLUGIN_NAME}.jar" "${FLINK_HOME}/plugins/${PLUGIN_NAME}" diff --git a/docker-compose.yml b/docker-compose.yml index 325ef6d..608de74 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,7 @@ services: localstack: image: localstack/localstack:3.0.2 + platform: linux/amd64 profiles: [kinesis,statefun,all] ports: - "4566:4566" @@ -18,6 +19,7 @@ services: create-streams: image: amazon/aws-cli + platform: linux/amd64 profiles: [kinesis,statefun,all] depends_on: - localstack @@ -41,6 +43,7 @@ services: " jobmanager: + platform: linux/amd64 profiles: [statefun,all] depends_on: - create-streams @@ -73,6 +76,7 @@ services: - ./docker-mounts/savepoints:/savepoints taskmanager: + platform: linux/amd64 profiles: [statefun,all] depends_on: - jobmanager @@ -104,6 +108,7 @@ services: send-events: image: amazon/aws-cli + platform: linux/amd64 profiles: [send-events,all] volumes: - /var/run/docker.sock:/var/run/docker.sock @@ -126,6 +131,7 @@ services: get-egress-events: image: amazon/aws-cli + platform: linux/amd64 profiles: [get-egress-events,all] volumes: - /var/run/docker.sock:/var/run/docker.sock diff --git a/src/main/java/com/example/stateful_functions/Configuration.java b/src/main/java/com/example/stateful_functions/Configuration.java index b83cdd6..7647bdc 100644 --- a/src/main/java/com/example/stateful_functions/Configuration.java +++ b/src/main/java/com/example/stateful_functions/Configuration.java @@ -1,6 +1,7 @@ package com.example.stateful_functions; import com.amazonaws.services.kinesisanalytics.runtime.KinesisAnalyticsRuntime; +import org.apache.flink.kinesis.shaded.com.amazonaws.services.dynamodbv2.xspec.S; import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -25,6 +26,7 @@ public class Configuration { public static boolean USE_ENHANCED_FANOUT = properties.getOrDefault("USE_ENHANCED_FANOUT", "true").equals("true"); public static String ENHANCED_FANOUT_NAME = properties.getOrDefault("ENHANCED_FANOUT_NAME", "example-enhanced-fanout").toString(); + public static String APP_VERSION = properties.getOrDefault("app.version", "0.1").toString(); public static final AwsRegion getAwsRegion() { @@ -52,13 +54,18 @@ public static final AwsRegion getAwsRegion() { } } - private static final Properties getProperties() { + private static Properties getProperties() { // System.getProperties + System.getenv() - Properties properties = System.getProperties(); - Map env = System.getenv(); + Properties properties = new Properties(); + try { + properties.load(Configuration.class.getResourceAsStream("/application.properties")); + } catch (Exception x) { + LOG.warn(x.getMessage(), x); + } - properties.putAll(env); + properties.putAll(System.getProperties()); + properties.putAll(System.getenv()); // If deployed in AWS Managed Flink, then get our config from // KinesisAnalyticsRuntime.getApplicationProperties().get("StatefunApplicationProperties") diff --git a/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java b/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java index 97d3c32..3921a31 100644 --- a/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java +++ b/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java @@ -8,6 +8,7 @@ public class CartItemStatusDetails { private int quantity; private BigDecimal currentPrice; private ProductAvailability availability; + private String version; // version of the app from application.properties public CartItemStatusDetails() { } @@ -18,6 +19,7 @@ private CartItemStatusDetails(Builder builder) { quantity = builder.quantity; currentPrice = builder.currentPrice; availability = builder.availability; + version = builder.version; } public String getProductId() { @@ -40,6 +42,9 @@ public ProductAvailability getAvailability() { return availability; } + public String getVersion() { + return version; + } public static final class Builder { private String productId; @@ -47,6 +52,7 @@ public static final class Builder { private int quantity; private BigDecimal currentPrice; private ProductAvailability availability; + private String version; public Builder() { } @@ -76,6 +82,11 @@ public Builder availability(ProductAvailability val) { return this; } + public Builder version(String val) { + version = val; + return this; + } + public CartItemStatusDetails build() { return new CartItemStatusDetails(this); } diff --git a/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java b/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java index 0b27300..b4ba427 100644 --- a/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java +++ b/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java @@ -1,6 +1,7 @@ package com.example.stateful_functions.function.cart; +import com.example.stateful_functions.Configuration; import com.example.stateful_functions.cloudevents.ExampleCloudEventType; import com.example.stateful_functions.cloudevents.data.CartItemStatusDetails; import com.example.stateful_functions.cloudevents.data.CartProductEventDetails; @@ -158,6 +159,7 @@ private void egressCartStatus(Context context, CartStateDetails cartState) { .originPrice(itemStateDetails.getOriginPrice()) .currentPrice(itemStateDetails.getPrice()) .availability(ProductAvailability.valueOf(itemStateDetails.getAvailability().name())) + .version(Configuration.APP_VERSION) .build() ); diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index d52783c..77cc591 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -1,2 +1,3 @@ # Reduce noise, primarily for tests spring.main.banner-mode=off +app.version=1.0 From fed64c56f43cae2913f2b4ff407bb164d253f34a Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Wed, 4 Dec 2024 14:42:24 -1000 Subject: [PATCH 05/28] Updated to show how to provision AWS Managed Flink via crossplane --- README.md | 201 +++++++- .../managed-flink-poc-bucket.yaml | 0 .../managed-flink-poc.yaml | 0 .../poc-get-events.sh | 0 .../poc-send-events.sh | 2 +- .../poc-tail-logs.sh | 0 aws-crossplane/NOTES.md | 54 ++ aws-crossplane/claims/demo-setup-claims.yaml | 41 ++ .../claims/managed-flink-claim.yaml | 26 + .../local/aws/cert-creation/job.yaml | 57 +++ aws-crossplane/local/aws/crossplane.yaml | 35 ++ .../local/aws/manifests/aws-services.yaml | 64 +++ aws-crossplane/local/aws/manifests/core.yaml | 86 ++++ .../local/aws/manifests/credentials.yaml | 13 + .../patch-and-transform-function.yaml | 7 + .../local/aws/manifests/provider-config.yaml | 14 + .../local/aws/manifests/secret.yaml | 8 + aws-crossplane/local/aws/providers.yaml | 19 + .../local/aws/update_credentials.sh | 24 + .../local/localstack/cert-creation/job.yaml | 57 +++ .../local/localstack/configs/function.yaml | 8 + .../localstack/configs/local-secret.yaml | 11 + .../configs/provider-config-localstack.yaml | 37 ++ .../local/localstack/configs/providers.yaml | 24 + .../local/localstack/configs/services.yaml | 53 ++ .../local/localstack/crossplane-configs.yaml | 19 + .../local/localstack/crossplane.yaml | 33 ++ .../local/localstack/localstack.yaml | 24 + aws-crossplane/poc-get-events.sh | 10 + aws-crossplane/poc-send-events.sh | 17 + aws-crossplane/poc-tail-logs.sh | 40 ++ aws-crossplane/resources/flink/README.md | 4 + .../resources/flink/flink-basic-comp.yaml | 262 ++++++++++ .../flink/flink-basic-example-claim.yaml | 25 + .../resources/flink/flink-lambda-comp.yaml | 464 ++++++++++++++++++ .../flink/flink-lambda-example-claim.yaml | 26 + aws-crossplane/resources/flink/flink-xrd.yaml | 61 +++ .../kinesis/kinesis-stream-comp.yaml | 71 +++ .../kinesis/kinesis-stream-example-claim.yaml | 15 + .../resources/kinesis/kinesis-stream-xrd.yaml | 61 +++ aws-crossplane/resources/s3/index.js | 8 + .../resources/s3/s3-bucket-comp.yaml | 44 ++ .../resources/s3/s3-bucket-example-claim.yaml | 8 + .../resources/s3/s3-bucket-xrd.yaml | 35 ++ .../resources/s3/s3-object-comp.yaml | 60 +++ .../resources/s3/s3-object-example-claim.yaml | 20 + .../resources/s3/s3-object-xrd.yaml | 43 ++ aws-crossplane/start-flink-lambda/.gitignore | 4 + aws-crossplane/start-flink-lambda/README.md | 10 + .../build_start_flink_py_zip.sh | 11 + .../start-flink-lambda/start_flink.py | 48 ++ 51 files changed, 2256 insertions(+), 8 deletions(-) rename managed-flink-poc-bucket.yaml => aws-cloudformation/managed-flink-poc-bucket.yaml (100%) rename managed-flink-poc.yaml => aws-cloudformation/managed-flink-poc.yaml (100%) rename poc-get-events.sh => aws-cloudformation/poc-get-events.sh (100%) rename poc-send-events.sh => aws-cloudformation/poc-send-events.sh (75%) rename poc-tail-logs.sh => aws-cloudformation/poc-tail-logs.sh (100%) create mode 100644 aws-crossplane/NOTES.md create mode 100644 aws-crossplane/claims/demo-setup-claims.yaml create mode 100644 aws-crossplane/claims/managed-flink-claim.yaml create mode 100644 aws-crossplane/local/aws/cert-creation/job.yaml create mode 100644 aws-crossplane/local/aws/crossplane.yaml create mode 100644 aws-crossplane/local/aws/manifests/aws-services.yaml create mode 100644 aws-crossplane/local/aws/manifests/core.yaml create mode 100644 aws-crossplane/local/aws/manifests/credentials.yaml create mode 100644 aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml create mode 100644 aws-crossplane/local/aws/manifests/provider-config.yaml create mode 100644 aws-crossplane/local/aws/manifests/secret.yaml create mode 100644 aws-crossplane/local/aws/providers.yaml create mode 100755 aws-crossplane/local/aws/update_credentials.sh create mode 100644 aws-crossplane/local/localstack/cert-creation/job.yaml create mode 100644 aws-crossplane/local/localstack/configs/function.yaml create mode 100644 aws-crossplane/local/localstack/configs/local-secret.yaml create mode 100644 aws-crossplane/local/localstack/configs/provider-config-localstack.yaml create mode 100644 aws-crossplane/local/localstack/configs/providers.yaml create mode 100644 aws-crossplane/local/localstack/configs/services.yaml create mode 100644 aws-crossplane/local/localstack/crossplane-configs.yaml create mode 100644 aws-crossplane/local/localstack/crossplane.yaml create mode 100644 aws-crossplane/local/localstack/localstack.yaml create mode 100755 aws-crossplane/poc-get-events.sh create mode 100755 aws-crossplane/poc-send-events.sh create mode 100755 aws-crossplane/poc-tail-logs.sh create mode 100644 aws-crossplane/resources/flink/README.md create mode 100644 aws-crossplane/resources/flink/flink-basic-comp.yaml create mode 100644 aws-crossplane/resources/flink/flink-basic-example-claim.yaml create mode 100644 aws-crossplane/resources/flink/flink-lambda-comp.yaml create mode 100644 aws-crossplane/resources/flink/flink-lambda-example-claim.yaml create mode 100644 aws-crossplane/resources/flink/flink-xrd.yaml create mode 100644 aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml create mode 100644 aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml create mode 100644 aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml create mode 100644 aws-crossplane/resources/s3/index.js create mode 100644 aws-crossplane/resources/s3/s3-bucket-comp.yaml create mode 100644 aws-crossplane/resources/s3/s3-bucket-example-claim.yaml create mode 100644 aws-crossplane/resources/s3/s3-bucket-xrd.yaml create mode 100644 aws-crossplane/resources/s3/s3-object-comp.yaml create mode 100644 aws-crossplane/resources/s3/s3-object-example-claim.yaml create mode 100644 aws-crossplane/resources/s3/s3-object-xrd.yaml create mode 100644 aws-crossplane/start-flink-lambda/.gitignore create mode 100644 aws-crossplane/start-flink-lambda/README.md create mode 100755 aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh create mode 100644 aws-crossplane/start-flink-lambda/start_flink.py diff --git a/README.md b/README.md index f4cacee..892fe73 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,41 @@ building and installing Apache Flink Stateful Functions compatible with Flink 1. ./mvnw test ``` +## Running the project via Docker Compose + +Follow the instructions below to run the project via Docker Compose. Note that Kinesis support is provided +by a [localstack](https://www.localstack.cloud/) container. + +The demo works using three docker compose "profiles" (phases). +1. In the first phase, the flink cluster running our stateful function application is started, + along with localstack, and an aws-cli container that creates the ingress and egress Kinesis streams. +2. The second phase runs an aws-cli container to send events to the ingress stream. The events + sent are from [product-cart-integration-test-events.jsonl](./src/test/resources/product-cart-integration-test-events.jsonl) +3. The third phase runs an aws-cli container to fetch the events from the egress stream and output them to the console. +```shell +# Build this project and create the jar file +./mvnw package + +# Build the flink docker images, and re-run these if code changes have been made +docker compose build jobmanager +docker compose build taskmanager + +# The statefun profile starts localstack, creates the kinesis streams, and starts the Flink jobmanager and taskmanager +docker compose --profile statefun up -d + +# Optionally connect the IDE debugger to the taskmanager on localhost port 5066 at this point + +# Send some events +docker compose --profile send-events up + +# Get and display the events from the egress stream +# Note that some VPNs (i.e., ZScaler) can cause failures with 'yum'. The workaround is to disconnect from the VPN first. +docker compose --profile get-egress-events up + +# Shut everything down +docker compose --profile all down +``` + ## Running the project via AWS Managed Flink ### Version compatibility between AWS Managed Flink and Stateful Functions @@ -115,25 +150,34 @@ branch, and build/install it locally via `mvn install` mvn package ``` -### Create an S3 bucket and upload this project's JAR file +The demo can be provisioned in AWS in two ways... via CloudFormation or Crossplane -To create the bucket, create a CloudFormation stack named `managed-flink-code-bucket` as defined [here](./managed-flink-poc-bucket.yaml), +### Provisioning via AWS CloudFormation + +The templates and scripts used for provisioning the AWS resources via CloudFormation are in the [aws-cloudformation](./aws-cloudformation) directory. +``` +cd aws-cloudformation +``` + +#### Create an S3 bucket and upload this project's JAR file + +To create the bucket, create a CloudFormation stack named `managed-flink-code-bucket` as defined [here](./aws-cloudformation/managed-flink-poc-bucket.yaml), and after that finishes, use the AWS CLI to upload the jar file: ```shell export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account -aws s3 cp target/my-stateful-functions-embedded-java-3.3.0.jar \ +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar \ s3://managed-flink-poc-bucket-codebucket-${AWS_ACCOUNT_ID}/ ``` -### Create the Kinesis streams, Managed Flink application, and related AWS Resources +#### Create the Kinesis streams, Managed Flink application, and related AWS Resources -Create a CloudFormation stack named `managed-flink-poc` as defined by the CloudFormation templates [here](./managed-flink-poc.yaml). +Create a CloudFormation stack named `managed-flink-poc` as defined by the CloudFormation templates [here](./aws-cloudformation/managed-flink-poc.yaml). This stack includes a custom resource lambda that programmatically configures logging when the Flink application is created, and transitions the application from the Ready to Running state. -### Monitor the CloudWatch logging output +#### Monitor the CloudWatch logging output ```shell ./poc-tail-logs.sh @@ -146,8 +190,151 @@ left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` d ./poc-send-events.sh ``` -### Get and display the events published to the egress stream +#### Get and display the events published to the egress stream ```shell ./poc-get-events.sh ``` +### Provisioning via Crossplane + +#### Prerequisites: +- Docker +- idpbuilder (https://github.com/cnoe-io/idpbuilder) +- kubectl +- jq +- python3 + +#### Introduction +This demo of provisioning via Crossplane is nowhere near production quality. It merely demonstrates that it is possible +to provision and run an AWS Managed Flink application via crossplane. Many tasks normally performed via CI/CD must be +completed manually as described below. The crossplane compositions currently use `function-patch-and-transform` instead +of a custom composition function, and because of that, many things in the compositions remain hard-coded (AWS account +number, region, ARNs in IAM roles, etc). In production systems, the lambda and related infrastructure that auto-starts +the Flink application probably only needs to be installed once per AWS account, and as such those resources should be +provisioned via a separate claim. Also, see my note below regarding the creation of a CloudWatch log group for the lambda. + + + +#### Instructions + +The files to run the crossplane demo are in the [aws-crossplane](./aws-crossplane) directory. + +##### Build the lambda handler package. What? A lambda? + +The [managed resource for creating AWS Managed Flink applications](https://marketplace.upbound.io/providers/upbound/provider-aws-kinesisanalyticsv2/v1.17.0/resources/kinesisanalyticsv2.aws.upbound.io/Application/v1beta1) +will do most of the work to get the Flink application provisioned, but if nothing else is done the application will +become 'Ready', and not 'Running'. Additional resources are required to auto-run the Flink app... namely a lambda that will +invoke an API call to start the application. This is in following with how it works when provisioning via CloudFormation. +In CloudFormation though, the Lambda code can be inlined in a CloudFormation template, but in Crossplane the Lambda code must be +referenced separately, e.g., via reference to the lambda package in an S3 file. + +Build the lambda package by following [the instructions here](./aws-crossplane/start-flink-lambda/README.md). The resulting Zip file will be +uploaded to S3 later, as you follow the steps below. + +The lambda will be provisioned along with AWS Managed Flink via a single claim, below. + +##### Create the CloudWatch log group for the lambda +Login to AWS Identity Center and launch the web console for the Sandbox account. + +Confirm the existence of, and create if necessary, the CloudWatch log group `/aws/lambda/flink-demo2-starter`. I can't +figure out how to do this using the managed resource provided by `provider-aws-cloudwatchlogs` because the log group +for the lambda must be named exactly that, the MR doesn't provide a way to set the name explicitly, and k8s/crossplane +doesn't like the slashes in `metadata.name`. + +##### Start the local IDP configured to use AWS +``` +cd aws-crossplane +``` +Login to AWS Identity Center, and copy the AWS environment variable commands from the IL Sandbox account, Access Keys page. + +Paste and execute the AWS environment variable commands, then run this script: + +``` +./local/aws/update_credentials.sh +``` + +Launch the local IDP using idpbuilder (https://github.com/cnoe-io/idpbuilder) + +``` +idpbuilder create -p ./local/aws +``` + +The `idpbuilder create` command takes a few minutes to complete, and even then it will take more time for crossplane to start and the providers to be loaded. + +Wait for the AWS providers to finish loading... + +``` +kubectl -n crossplane-system get pods | grep provider-aws +``` + +Wait until the command above returns a list of pods all in the `Running` state. + +##### Install the Crossplane resources (XRDs and Compositions) +Install the Composite Resource Definitions and Compositions required by the demo. Ignore the warnings issued by the following command: + +``` +for i in $(find resources -name \*xrd.yaml -o -name \*comp.yaml); do k apply -f $i; done +``` + +At the time of this writing the demo does not utilize a custom composition function. Instead, it uses the off-the-shelf function `function-patch-and-transform` which gets loaded during IDP creation, above. + +##### Provision AWS MAnaged Flink via Crossplane claims + +Provision the S3 bucket and Kinesis streams... +``` +kubectl apply -f claims/demo-setup-claims.yaml +``` + +Wait for the resources to become synchronized and ready by checking the output of the following command: +``` +kubectl get managed +``` +The output of `kubectl get managed` will reveal the actual S3 bucket name under `EXTERNAL-NAME`. + +Return to AWS Identity Center and launch the web console for the Sandbox account. + +Visit the S3 services page. Find the S3 bucket (flink-demo-bucket-*) and upload the following files to the bucket +- `../target/my-stateful-functions-embedded-java-3.3.0.jar` (Flink demo application code) +- `start-flink-lambda/start_flink_py.zip` (Lambda handler code which transitions the Managed Flink instance to the 'Running' state) + +Alternatively, use the AWS CLI to upload the files to the bucket (replace `XXXXX` with the bucket's unique suffix)... +``` +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://flink-demo-bucket-XXXXX/my-stateful-functions-embedded-java-3.3.0.jar +aws s3 cp start-flink-lambda/start_flink_py.zip s3://flink-demo-bucket-XXXXX/start_flink_py.zip +``` + +##### Provision the Managed Flink application + +Applying the following claim will trigger the creation of the Flink application, its role, and log groups. Note that the Flink application will become 'Ready' but will not run on its own. Additional resources are required to auto-run the Flink app... a lambda for handling EventBridge events from the Flink application, an EventBridge rule and trigger to invoke the lambda, an IAM role allowing the lambda to make API calls to observe and control the Flink app, plus a permission for the EventBridge rule to invoke the lambda as a target. When the lambda sees that the Flink application is in the Ready state, it will invoke an API call to start the application. + +``` +kubectl apply -f claims/managed-flink-claim.yaml +``` + +Wait until the Flink application is in the 'Running' state, then execute the following commands to send events and see the results: + +``` +# Send the test events +./poc-send-events.sh + +# Fetch and display the results from the egress stream +./poc-get-events.sh +``` + +``` +#### Cleanup + +``` +kubectl delete -f resources/claims/managed-flink-claims.yaml +kubectl delete -f resources/claims/demo-setup-claims.yaml +``` + +Visit the S3 bucket in the web console and delete the files in the bucket. Having issued the `kubectl delete` command on the demo setup claims will trigger the bucket to be deleted automatically soon after it is emptied. + +Shut down the local IDP with the command: +``` +idpbuilder delete +``` + +Manually remove the CloudWatch log group `/aws/lambda/flink-demo2-starter`. + diff --git a/managed-flink-poc-bucket.yaml b/aws-cloudformation/managed-flink-poc-bucket.yaml similarity index 100% rename from managed-flink-poc-bucket.yaml rename to aws-cloudformation/managed-flink-poc-bucket.yaml diff --git a/managed-flink-poc.yaml b/aws-cloudformation/managed-flink-poc.yaml similarity index 100% rename from managed-flink-poc.yaml rename to aws-cloudformation/managed-flink-poc.yaml diff --git a/poc-get-events.sh b/aws-cloudformation/poc-get-events.sh similarity index 100% rename from poc-get-events.sh rename to aws-cloudformation/poc-get-events.sh diff --git a/poc-send-events.sh b/aws-cloudformation/poc-send-events.sh similarity index 75% rename from poc-send-events.sh rename to aws-cloudformation/poc-send-events.sh index 6ff5744..9d874ed 100755 --- a/poc-send-events.sh +++ b/aws-cloudformation/poc-send-events.sh @@ -2,7 +2,7 @@ stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep ManagedFlinkIngressStream) -grep -v test.action src/test/resources/product-cart-integration-test-events.jsonl | while read line; do +grep -v test.action ../src/test/resources/product-cart-integration-test-events.jsonl | while read line; do partkey=$(echo $line | md5sum | awk '{print $1}') data=$(echo $line | base64) cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" diff --git a/poc-tail-logs.sh b/aws-cloudformation/poc-tail-logs.sh similarity index 100% rename from poc-tail-logs.sh rename to aws-cloudformation/poc-tail-logs.sh diff --git a/aws-crossplane/NOTES.md b/aws-crossplane/NOTES.md new file mode 100644 index 0000000..779558f --- /dev/null +++ b/aws-crossplane/NOTES.md @@ -0,0 +1,54 @@ + +I initially put the stream ARN values in the environment section of (the managed flink claim)[./claims/mananged-flink-claim.yaml]. +Just the plain stream names are required, however after updating the values in the claim and applying the change, I see this +error in the output of `kubectl describe application.kinesisanalyticsv2.aws.upbound.io/flink-demo2-application`... + +``` +Warning CannotUpdateExternalResource 4m19s (x14 over 6m31s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application +(combined from similar events): async update failed: failed to update the resource: [{0 updating Kinesis Analytics v2 Application +(arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application): operation error Kinesis Analytics V2: +UpdateApplication, https response error StatusCode: 400, RequestID: 39586af4-c1cc-4515-b818-c86f8f176671, +InvalidApplicationConfigurationException: Failed to take snapshot for the application flink-demo2-application at this moment. +The application is currently experiencing downtime. Please check the application's CloudWatch metrics or CloudWatch +logs for any possible errors and retry the request. You can also retry the request after disabling the snapshots in +the Kinesis Data Analytics console or by updating the ApplicationSnapshotConfiguration through the AWS SDK. []}] +``` + +It appears that the snapshot issue is preventing the update that would fix the snapshot issue :( + + +I then tried to delete the claim and re-apply it as soon as the managed resources disappeared and the Flink app no longer +showed in the AWS console, but the new app got stuck on this: + +``` +Warning CannotCreateExternalResource 51s (x39 over 4m41s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application +(combined from similar events): async create failed: failed to create the resource: [{0 creating Kinesis Analytics v2 Application +(flink-demo2-application): operation error Kinesis Analytics V2: CreateApplication, https response error StatusCode: 400, +RequestID: 64366786-9f40-440f-8fcd-c3376f0cc619, ConcurrentModificationException: Tags are already registered for this +resource ARN: arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application, please retry later. +Or you can create without tags and then add tags using TagResource API after successful resource creation. []}] +``` + +Third try after waiting longer between delete and apply... + +``` +Normal UpdatedExternalResource 99s (x2 over 5m18s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application Successfully requested update of external resource +``` + +But in the AWS Console, the app seems stuck with the 'Updating' status. OK, waited a bit and it's now 'Running', except... + +``` +{ + "applicationARN": "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application", + "applicationVersionId": "3", + "locationInformation": "org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.translateDeleteException(MultiObjectDeleteSupport.java:107)", + "logger": "org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport", + "message": "AccessDenied: b97af75851aadd301cb2f64ad11c0ef0-516535517513-1733357577755/: User: arn:aws:sts::695788120607:assumed-role/AWSKinesisAnalyticsKubern-S3CustomerAppStateAccess-LZ083MW7K490/FlinkApplicationStateSession is not authorized to perform: s3:DeleteObject on resource: \"arn:aws:s3:::cc75a9b61f353980b2f0360aaee434149a950968/b97af75851aadd301cb2f64ad11c0ef0-516535517513-1733357577755/\" because no session policy allows the s3:DeleteObject action\n", + "messageSchemaVersion": "1", + "messageType": "WARN", + "threadName": "s3a-transfer-cc75a9b61f353980b2f0360aaee434149a950968-unbounded-pool2-t16" +} +``` + +And were back on 'Updating' status w/o doing anything except to go look at the logs, where I saw an error about not +having permissions to delete objects from S3, and while I was typing this it went back to 'Running' status. diff --git a/aws-crossplane/claims/demo-setup-claims.yaml b/aws-crossplane/claims/demo-setup-claims.yaml new file mode 100644 index 0000000..23761d4 --- /dev/null +++ b/aws-crossplane/claims/demo-setup-claims.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: kellinwood.com/v1alpha1 +kind: S3Bucket +metadata: + name: flink-demo-bucket + namespace: default +spec: + resourceConfig: + region: us-east-2 +--- +apiVersion: kellinwood.com/v1alpha1 +kind: KinesisStream +metadata: + name: flink-demo-ingress + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: flink-demo-ingress + streamMode: PROVISIONED + shardCount: 1 + retentionPeriod: 26 + tags: + createdBy: ken.ellinwood@imaginelearning.com + purpose: statefun-ingress +--- +apiVersion: kellinwood.com/v1alpha1 +kind: KinesisStream +metadata: + name: flink-demo-egress + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: flink-demo-egress + streamMode: PROVISIONED + shardCount: 1 + retentionPeriod: 26 + tags: + createdBy: ken.ellinwood@imaginelearning.com + purpose: statefun-egress diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml new file mode 100644 index 0000000..3f386d4 --- /dev/null +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -0,0 +1,26 @@ +apiVersion: kellinwood.com/v1alpha1 +kind: ManagedFlink +metadata: + name: flink-demo2 + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: flink-demo2 + codeBucket: flink-demo-bucket + codeFile: my-stateful-functions-embedded-java-3.3.0.jar + runtime: FLINK-1_18 + parallelism: 1 + environmentProperties: + - propertyGroup: + - propertyGroupId: StatefunApplicationProperties + propertyMap: + EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress + AWS_REGION: us-east-2 + FOO: bar + compositionSelector: + matchLabels: + appReadyHandler: lambda + + diff --git a/aws-crossplane/local/aws/cert-creation/job.yaml b/aws-crossplane/local/aws/cert-creation/job.yaml new file mode 100644 index 0000000..bcd1e68 --- /dev/null +++ b/aws-crossplane/local/aws/cert-creation/job.yaml @@ -0,0 +1,57 @@ +# Get the certificate and create a configmap +apiVersion: batch/v1 +kind: Job +metadata: + name: cert-job + annotations: + argocd.argoproj.io/hook: PreSync +spec: + template: + spec: + serviceAccountName: cert-sa + containers: + - name: cert-container + image: bitnami/kubectl:1.30.3 + command: ["sh", "-c"] + args: + - |- + sleep 20 + cert=$(kubectl get secret -n default idpbuilder-cert -o go-template='{{range $k,$v := .data}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}') + kubectl create configmap cert -n crossplane-system --from-literal=ca.crt="$cert" || echo "failed to create configmap" + restartPolicy: Never +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-role + annotations: + argocd.argoproj.io/hook: PreSync +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "describe"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-role +subjects: +- kind: ServiceAccount + name: cert-sa + namespace: crossplane-system \ No newline at end of file diff --git a/aws-crossplane/local/aws/crossplane.yaml b/aws-crossplane/local/aws/crossplane.yaml new file mode 100644 index 0000000..5f1c2e1 --- /dev/null +++ b/aws-crossplane/local/aws/crossplane.yaml @@ -0,0 +1,35 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane + namespace: argocd + labels: + env: dev + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + sources: + - repoURL: 'https://charts.crossplane.io/stable' + targetRevision: 1.17.1 + helm: + releaseName: crossplane + values: | + args: + - "--enable-environment-configs" + registryCaBundleConfig: + name: "cert" + key: "ca.crt" + chart: crossplane + - repoURL: cnoe://cert-creation + targetRevision: HEAD + path: "." + destination: + server: 'https://kubernetes.default.svc' + namespace: crossplane-system + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/aws-services.yaml b/aws-crossplane/local/aws/manifests/aws-services.yaml new file mode 100644 index 0000000..14107af --- /dev/null +++ b/aws-crossplane/local/aws/manifests/aws-services.yaml @@ -0,0 +1,64 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-s3 +spec: + package: xpkg.upbound.io/upbound/provider-aws-s3:v1.17.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-iam +spec: + package: xpkg.upbound.io/upbound/provider-aws-iam:v1.17.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesis +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesis:v1.17.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesisanalyticsv2 +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.17.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-cloudwatchlogs +spec: + package: xpkg.upbound.io/upbound/provider-aws-cloudwatchlogs:v1.17.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-cloudwatchevents +spec: + package: xpkg.upbound.io/upbound/provider-aws-cloudwatchevents:v1.17.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-lambda + namespace: crossplane-system +spec: + package: xpkg.upbound.io/upbound/provider-aws-lambda:v1.17.0 + controllerConfigRef: + name: aws-config \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/core.yaml b/aws-crossplane/local/aws/manifests/core.yaml new file mode 100644 index 0000000..8e1eca5 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/core.yaml @@ -0,0 +1,86 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: upbound-provider-family-aws +spec: + package: xpkg.upbound.io/upbound/provider-family-aws:v1.11.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-helm +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-helm:v0.14.0 + controllerConfigRef: + name: provider-helm +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-kubernetes +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.7.0 + controllerConfigRef: + name: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: provider-helm + namespace: crossplane-system +spec: + serviceAccountName: provider-helm +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: provider-kubernetes + namespace: crossplane-system +spec: + serviceAccountName: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: aws-config +spec: + args: + - --debug + podSecurityContext: + fsGroup: 2000 + credentials: + secretRef: + key: creds + name: aws-secret + namespace: crossplane-system + source: Secret +--- +apiVersion: aws.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: aws-provider + namespace: crossplane-system + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + secretRef: + key: creds + name: aws-secret + namespace: crossplane-system + source: Secret +--- +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: kubernetes-provider + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + source: InjectedIdentity \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/credentials.yaml b/aws-crossplane/local/aws/manifests/credentials.yaml new file mode 100644 index 0000000..9cc8785 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/credentials.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +stringData: + creds: | + [default] + aws_access_key_id=ASIAXQQ7KQFETOZQOKBW + aws_secret_access_key=4D5+go8Xu/ECTkhEvo1ElSQzsWGsPQhqHdtfErR0 + aws_session_token=IQoJb3JpZ2luX2VjEE8aCXVzLWVhc3QtMiJIMEYCIQDMk2rlsyaegjXW0veslAkZZLnLQF7D1QTdgZ9y6keuGwIhAJDRpIhgiFFmzJJgZt02YcQoJreR/u9M0H4XfG1VtlvEKqoDCPj//////////wEQABoMNTE2NTM1NTE3NTEzIgzdOexLddWNYoPpRpgq/gJIm5MoOmHKO1tLHDX12/sTDb/P4PVxq/7Weq75DQE7N7w4HHMGtTWv1TJmkkYqiNSs0m2l62XQq2zDMo3HB+QXTXnTmXeO/0d1fZtcmwn6mOUigs3G+Sa607St31bDKfBS6MqnCNCtQqdMHa612zjwivZ5b8cayFiHEimyiyeLli0XOYQheve3rvMHp6ZMU+DONHXNLO9WheMtdgwRGjRAW7TCxI/oXvIZnjLKoTX810sWv+xmJBF7a95ED/ScX5Y/PYwfLhX684KE0my+odSFz3MtJj6nEQgY2KGXKwu9apfRje6J0f1FHb5HmfIcxL45JEkY+o9NYY7vR8xsdSZPndgNXUVgR+cXnaV19ckFAkGYW0HauK15uKGDxkPtWTsc3mmGaQSsPJMZwK8hOja7OG8+HdemNCjzbRmZYsUdua3ZkQOdpItjxq1B6yt56VXlWrQIOa9C5C6oOuxyUhqIs5rT912+ZQbRWSEKG7Ac+cE97MTgp/4jHeTGnb8AMNm7w7oGOqUBPMiT81S3m87M3JAaX0BdHmlagCXqxpnvcgHHZklR99yhexuEEYb8E/TUFNvb1l0X0Nn991XSS+LHpjdmysDB5dYF0UWNjxogQuj5eTnF6rNX67f5srcMBQDyWgFbf1YU33eDMrR1g6AzcSYuGKH+aCsGg/RdcxPGTw7cKi3BQ6Pd3penTmk5qPAJDUVl/ZYWeVR5XhViIiGrKI6TV6kzZX0mQcky +kind: Secret +metadata: + creationTimestamp: null + name: aws-secret + namespace: crossplane-system \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml b/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml new file mode 100644 index 0000000..15c2626 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-patch-and-transform +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 diff --git a/aws-crossplane/local/aws/manifests/provider-config.yaml b/aws-crossplane/local/aws/manifests/provider-config.yaml new file mode 100644 index 0000000..9c8900d --- /dev/null +++ b/aws-crossplane/local/aws/manifests/provider-config.yaml @@ -0,0 +1,14 @@ +apiVersion: aws.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: provider-aws + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + secretRef: + key: creds + name: aws-secret + namespace: crossplane-system + source: Secret \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/secret.yaml b/aws-crossplane/local/aws/manifests/secret.yaml new file mode 100644 index 0000000..3b0bfd0 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/secret.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: init-secret + namespace: crossplane-system +stringData: + password: "password" \ No newline at end of file diff --git a/aws-crossplane/local/aws/providers.yaml b/aws-crossplane/local/aws/providers.yaml new file mode 100644 index 0000000..0e9694b --- /dev/null +++ b/aws-crossplane/local/aws/providers.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane-configs + namespace: argocd +spec: + destination: + namespace: crossplane-system + server: "https://kubernetes.default.svc" + source: + repoURL: cnoe://manifests + targetRevision: HEAD + path: "." + project: default + syncPolicy: + automated: + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/aws/update_credentials.sh b/aws-crossplane/local/aws/update_credentials.sh new file mode 100755 index 0000000..365103e --- /dev/null +++ b/aws-crossplane/local/aws/update_credentials.sh @@ -0,0 +1,24 @@ +#! /bin/bash + +# Update manifests/credentials.yaml with values from environment variables +cd $(dirname $0) + +required_vars="AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN" +for var in ${required_vars}; do + val=$(eval "echo \$$var") + if [ -z "$val" ]; then + echo "$var is not set" + exit 1 + fi +done + +git restore manifests/credentials.yaml + +cat manifests/credentials.yaml | \ + sed "s!aws_access_key_id=REPLACE!aws_access_key_id=$AWS_ACCESS_KEY_ID!" | \ + sed "s!aws_secret_access_key=REPLACE!aws_secret_access_key=$AWS_SECRET_ACCESS_KEY!" | \ + sed "s!aws_session_token=REPLACE!aws_session_token=$AWS_SESSION_TOKEN!" >manifests/credentials.yaml.tmp +mv manifests/credentials.yaml.tmp manifests/credentials.yaml + +echo "Run this command to clear env vars:" +echo "unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN" diff --git a/aws-crossplane/local/localstack/cert-creation/job.yaml b/aws-crossplane/local/localstack/cert-creation/job.yaml new file mode 100644 index 0000000..bcd1e68 --- /dev/null +++ b/aws-crossplane/local/localstack/cert-creation/job.yaml @@ -0,0 +1,57 @@ +# Get the certificate and create a configmap +apiVersion: batch/v1 +kind: Job +metadata: + name: cert-job + annotations: + argocd.argoproj.io/hook: PreSync +spec: + template: + spec: + serviceAccountName: cert-sa + containers: + - name: cert-container + image: bitnami/kubectl:1.30.3 + command: ["sh", "-c"] + args: + - |- + sleep 20 + cert=$(kubectl get secret -n default idpbuilder-cert -o go-template='{{range $k,$v := .data}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}') + kubectl create configmap cert -n crossplane-system --from-literal=ca.crt="$cert" || echo "failed to create configmap" + restartPolicy: Never +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-role + annotations: + argocd.argoproj.io/hook: PreSync +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "describe"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-role +subjects: +- kind: ServiceAccount + name: cert-sa + namespace: crossplane-system \ No newline at end of file diff --git a/aws-crossplane/local/localstack/configs/function.yaml b/aws-crossplane/local/localstack/configs/function.yaml new file mode 100644 index 0000000..9fac525 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/function.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-patch-and-transform + namespace: crossplane-system +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 diff --git a/aws-crossplane/local/localstack/configs/local-secret.yaml b/aws-crossplane/local/localstack/configs/local-secret.yaml new file mode 100644 index 0000000..3044636 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/local-secret.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: local-secret + namespace: crossplane-system +stringData: + creds: | + [default] + aws_access_key_id = replaceme + aws_secret_access_key = replaceme + aws_session_token = replaceme \ No newline at end of file diff --git a/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml b/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml new file mode 100644 index 0000000..7275662 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml @@ -0,0 +1,37 @@ +apiVersion: aws.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: provider-aws + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: local-secret + key: creds + endpoint: + services: + - dynamodb + - iam + - lambda + - s3 + - sqs + - sns + - kinesis + - firehose + - cloudwatch + - logs + - secretsmanager + - eventbridge + - kinesisanalyticsv2 + hostnameImmutable: true + url: + type: Static + static: http://localstack.localstack.svc.cluster.local:4566 + skip_credentials_validation: true + skip_metadata_api_check: true + skip_requesting_account_id: true + s3_use_path_style: true \ No newline at end of file diff --git a/aws-crossplane/local/localstack/configs/providers.yaml b/aws-crossplane/local/localstack/configs/providers.yaml new file mode 100644 index 0000000..6a9aa43 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/providers.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-kubernetes +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.7.0 + controllerConfigRef: + name: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: provider-kubernetes + namespace: crossplane-system +spec: + serviceAccountName: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: upbound-provider-family-aws +spec: + package: xpkg.upbound.io/upbound/provider-family-aws:v1.11.0 diff --git a/aws-crossplane/local/localstack/configs/services.yaml b/aws-crossplane/local/localstack/configs/services.yaml new file mode 100644 index 0000000..a41e3c7 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/services.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-s3 +spec: + package: xpkg.upbound.io/upbound/provider-aws-s3:v1.11.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-cloudwatchlogs +spec: + package: xpkg.upbound.io/upbound/provider-aws-cloudwatchlogs:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-iam +spec: + package: xpkg.upbound.io/upbound/provider-aws-iam:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesis + namespace: crossplane-system +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesis:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesisanalyticsv2 + namespace: crossplane-system +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-cloudwatchevents + namespace: crossplane-system +spec: + package: xpkg.upbound.io/upbound/provider-aws-cloudwatchevents:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-lambda + namespace: crossplane-system +spec: + package: xpkg.upbound.io/upbound/provider-aws-lambda:v1.17.0 diff --git a/aws-crossplane/local/localstack/crossplane-configs.yaml b/aws-crossplane/local/localstack/crossplane-configs.yaml new file mode 100644 index 0000000..0d57279 --- /dev/null +++ b/aws-crossplane/local/localstack/crossplane-configs.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane-configs + namespace: argocd +spec: + destination: + namespace: crossplane-system + server: "https://kubernetes.default.svc" + source: + repoURL: cnoe://configs + targetRevision: HEAD + path: "." + project: default + syncPolicy: + automated: + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/localstack/crossplane.yaml b/aws-crossplane/local/localstack/crossplane.yaml new file mode 100644 index 0000000..f2e50c5 --- /dev/null +++ b/aws-crossplane/local/localstack/crossplane.yaml @@ -0,0 +1,33 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane + namespace: argocd + labels: + env: dev + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + sources: + - repoURL: 'https://charts.crossplane.io/stable' + targetRevision: 1.17.1 + helm: + releaseName: crossplane + values: | + registryCaBundleConfig: + name: "cert" + key: "ca.crt" + chart: crossplane + - repoURL: cnoe://cert-creation + targetRevision: HEAD + path: "." + destination: + server: 'https://kubernetes.default.svc' + namespace: crossplane-system + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/localstack/localstack.yaml b/aws-crossplane/local/localstack/localstack.yaml new file mode 100644 index 0000000..1f8d06b --- /dev/null +++ b/aws-crossplane/local/localstack/localstack.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: localstack + namespace: argocd + labels: + example: localstack-integration +spec: + project: default + source: + repoURL: https://localstack.github.io/helm-charts + targetRevision: 0.6.12 + chart: localstack + helm: + releaseName: localstack + destination: + server: "https://kubernetes.default.svc" + namespace: localstack + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/poc-get-events.sh b/aws-crossplane/poc-get-events.sh new file mode 100755 index 0000000..1baa5c0 --- /dev/null +++ b/aws-crossplane/poc-get-events.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-demo-egress) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +for encoded_data in $(aws kinesis get-records --shard-iterator $shard_iterator | jq -crM .Records[].Data); do + echo $encoded_data | base64 -d | jq . +done diff --git a/aws-crossplane/poc-send-events.sh b/aws-crossplane/poc-send-events.sh new file mode 100755 index 0000000..96d2706 --- /dev/null +++ b/aws-crossplane/poc-send-events.sh @@ -0,0 +1,17 @@ +#! /bin/bash + +set -e +MD5CMD=md5 + +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-demo-ingress) +if [ -z "$stream_name" ]; then + echo "Stream not found" + exit 1 +fi +grep -v test.action ../src/test/resources/product-cart-integration-test-events.jsonl | while read line; do + partkey=$(echo $line | $MD5CMD | awk '{print $1}') + data=$(echo $line | base64) + cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" + echo $cmd + eval $cmd +done diff --git a/aws-crossplane/poc-tail-logs.sh b/aws-crossplane/poc-tail-logs.sh new file mode 100755 index 0000000..0f120bf --- /dev/null +++ b/aws-crossplane/poc-tail-logs.sh @@ -0,0 +1,40 @@ +#! /bin/bash + +set -e + +cd $(dirname $0) + +NEXT_TOKEN_ARG= + +CWLOGS_DIR=.cwlogs +mkdir -p $CWLOGS_DIR + +ITERATION=1 + +if [ -f $CWLOGS_DIR/next.token ]; then + NEXT_TOKEN_ARG="--next-token $(cat $CWLOGS_DIR/next.token)" +fi + +while true; do + CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json + aws logs get-log-events \ + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name flink-demo2-app-log-group \ + --log-stream-name flink-demo2-app-log-stream \ + >$CWLOG_FILE + + NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) + echo $NEXT_TOKEN >$CWLOGS_DIR/next.token + NEXT_TOKEN_ARG="--next-token $NEXT_TOKEN" + EVENT_COUNT=$(cat $CWLOG_FILE | jq -crM '.events | length') + + if [[ $EVENT_COUNT == 0 ]]; then + sleep 2 + rm $CWLOG_FILE + else + cat $CWLOG_FILE | jq -crM '.events[] | [.timestamp,(.message | fromjson | [.messageType,.logger,.message] | join(" "))] | join(" ")' | tee -a $CWLOGS_DIR/formatted.log + fi + + ITERATION=$(echo "1 + $ITERATION" | bc) +done diff --git a/aws-crossplane/resources/flink/README.md b/aws-crossplane/resources/flink/README.md new file mode 100644 index 0000000..5573cd7 --- /dev/null +++ b/aws-crossplane/resources/flink/README.md @@ -0,0 +1,4 @@ +Two compositions are provided. The first, `flink-basic-comp.yaml` creates a Managed Flink instance and associated +CloudWatch log group and stream. This composition results in a Managed Flink instance in the 'Ready' state. The second, +`flink-lambda-comp.yaml` goes further to also create a lambda that observes the Managed Flink instance and automatically +transitions it to the running state. \ No newline at end of file diff --git a/aws-crossplane/resources/flink/flink-basic-comp.yaml b/aws-crossplane/resources/flink/flink-basic-comp.yaml new file mode 100644 index 0000000..215d879 --- /dev/null +++ b/aws-crossplane/resources/flink/flink-basic-comp.yaml @@ -0,0 +1,262 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: flinkbasic.kellinwood.com + labels: + appReadyHandler: none +spec: + compositeTypeRef: + apiVersion: kellinwood.com/v1alpha1 + kind: XManagedFlink + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: managed-flink-application + base: + apiVersion: kinesisanalyticsv2.aws.upbound.io/v1beta1 + kind: Application + metadata: + annotations: + meta.upbound.io/example-id: kinesisanalyticsv2/v1beta1/application + name: example + spec: + forProvider: + applicationConfiguration: + - applicationCodeConfiguration: + - codeContentType: ZIPFILE + codeContent: + - s3ContentLocation: + - fileKey: example-flink-application.jar + bucketArnSelector: + matchLabels: + crossplane.io/claim-name: example-bucket + applicationSnapshotConfiguration: + - snapshotsEnabled: true + environmentProperties: + - propertyGroup: + - propertyGroupId: MyAppProperties + propertyMap: + FOO: bar + AWS_REGION: us-west-1 + flinkApplicationConfiguration: + - checkpointConfiguration: + - checkpointInterval: 300000 # 5 mins * 60 secs/min * 1000 millis/sec + checkpointingEnabled: true + configurationType: CUSTOM + monitoringConfiguration: + - logLevel: INFO + metricsLevel: TASK + configurationType: CUSTOM + parallelismConfiguration: + - autoScalingEnabled: false + parallelism: 2 + parallelismPerKpu: 1 + configurationType: CUSTOM + runConfiguration: + - applicationRestoreConfiguration: + - applicationRestoreType: RESTORE_FROM_LATEST_SNAPSHOT # RESTORE_FROM_CUSTOM_SNAPSHOT, RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT + # snapshotName: xyz # Specify this when restoreType = RESTORE_FROM_CUSTOM_SNAPSHOT + flinkRunConfiguration: + - allowNonRestoredState: false + applicationMode: STREAMING + cloudwatchLoggingOptions: + - logStreamArnSelector: + matchControllerRef: true + region: us-east-2 + runtimeEnvironment: FLINK-1_18 + startApplication: true + serviceExecutionRoleSelector: + matchLabels: + rolePurpose: flink-application + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-application" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.codeFile + toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].fileKey + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.codeBucket + toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].bucketArnSelector.matchLabels['crossplane.io/claim-name'] + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.runtime + toFieldPath: spec.forProvider.runtimeEnvironment + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.parallelism + toFieldPath: spec.forProvider.applicationConfiguration[0].flinkApplicationConfiguration[0].parallelismConfiguration[0].parallelism + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.environmentProperties + toFieldPath: spec.forProvider.applicationConfiguration[0].environmentProperties + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.region + toFieldPath: spec.forProvider.region + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.arn + toFieldPath: status.managedFlinkArn + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.id + toFieldPath: status.managedFlinkName + - name: managed-flink-role + base: + apiVersion: iam.aws.upbound.io/v1beta1 + kind: Role + metadata: + annotations: + meta.upbound.io/example-id: iam/v1beta1/role + labels: + rolePurpose: flink-application + name: example + spec: + forProvider: + assumeRolePolicy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "kinesisanalytics.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + managedPolicyArns: + - arn:aws:iam::aws:policy/AmazonKinesisFullAccess + - arn:aws:iam::aws:policy/AmazonS3FullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + inlinePolicy: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-ingress", + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards" + ] + } + ] + } + - name: logs_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:us-east-2:516535517513:log-group:flink-demo-app-log-group" + ], + "Action": [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ] + } + ] + } + - name: metrics_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": "*", + "Action": [ + "cloudwatch:PutMetricData" + ] + } + ] + } + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-role" + - name: log-group + base: + apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 + kind: Group + metadata: + annotations: + meta.upbound.io/example-id: cloudwatchlogs/v1beta1/group + name: example + spec: + forProvider: + region: us-east-2 + retentionInDays: 7 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-log-group" + - name: log-stream + base: + apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 + kind: Stream + metadata: + annotations: + meta.upbound.io/example-id: cloudwatchlogs/v1beta1/stream + name: example + spec: + forProvider: + logGroupNameSelector: + matchControllerRef: true + name: example + region: us-east-2 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-log-stream" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: spec.forProvider.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-log-stream" + \ No newline at end of file diff --git a/aws-crossplane/resources/flink/flink-basic-example-claim.yaml b/aws-crossplane/resources/flink/flink-basic-example-claim.yaml new file mode 100644 index 0000000..8b5355c --- /dev/null +++ b/aws-crossplane/resources/flink/flink-basic-example-claim.yaml @@ -0,0 +1,25 @@ +apiVersion: kellinwood.com/v1alpha1 +kind: ManagedFlink +metadata: + name: flink-demo + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: flink-demo + codeBucket: flink-demo-bucket + codeFile: my-stateful-functions-embedded-java-3.3.0.jar + runtime: FLINK-1_18 + parallelism: 1 + environmentProperties: + - propertyGroup: + - propertyGroupId: StatefunApplicationProperties + propertyMap: + EVENTS_INGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress + AWS_REGION: us-east-2 + compositionSelector: + matchLabels: + appReadyHandler: none # Use the composition that doesn't provision a lambda function to handle the app ready signal + + diff --git a/aws-crossplane/resources/flink/flink-lambda-comp.yaml b/aws-crossplane/resources/flink/flink-lambda-comp.yaml new file mode 100644 index 0000000..c0f3f9f --- /dev/null +++ b/aws-crossplane/resources/flink/flink-lambda-comp.yaml @@ -0,0 +1,464 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: flinklambdastart.kellinwood.com + labels: + appReadyHandler: lambda +spec: + compositeTypeRef: + apiVersion: kellinwood.com/v1alpha1 + kind: XManagedFlink + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: managed-flink-application + base: + apiVersion: kinesisanalyticsv2.aws.upbound.io/v1beta1 + kind: Application + metadata: + annotations: + meta.upbound.io/example-id: kinesisanalyticsv2/v1beta1/application + name: example + spec: + forProvider: + applicationConfiguration: + - applicationCodeConfiguration: + - codeContentType: ZIPFILE + codeContent: + - s3ContentLocation: + - fileKey: example-flink-application.jar + bucketArnSelector: + matchLabels: + crossplane.io/claim-name: example-bucket + applicationSnapshotConfiguration: + - snapshotsEnabled: true + environmentProperties: + - propertyGroup: + - propertyGroupId: MyAppProperties + propertyMap: + FOO: bar + AWS_REGION: us-west-1 + flinkApplicationConfiguration: + - checkpointConfiguration: + - checkpointInterval: 300000 # 5 mins * 60 secs/min * 1000 millis/sec + checkpointingEnabled: true + configurationType: CUSTOM + monitoringConfiguration: + - logLevel: INFO + metricsLevel: TASK + configurationType: CUSTOM + parallelismConfiguration: + - autoScalingEnabled: false + parallelism: 2 + parallelismPerKpu: 1 + configurationType: CUSTOM + runConfiguration: + - applicationRestoreConfiguration: + - applicationRestoreType: RESTORE_FROM_LATEST_SNAPSHOT # RESTORE_FROM_CUSTOM_SNAPSHOT, RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT + # snapshotName: xyz # Specify this when restoreType = RESTORE_FROM_CUSTOM_SNAPSHOT + flinkRunConfiguration: + - allowNonRestoredState: false + applicationMode: STREAMING + cloudwatchLoggingOptions: + - logStreamArnSelector: + matchControllerRef: true + region: us-east-2 + runtimeEnvironment: FLINK-1_18 + startApplication: true + serviceExecutionRoleSelector: + matchLabels: + rolePurpose: flink-application + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-application" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.codeFile + toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].fileKey + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.codeBucket + toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].bucketArnSelector.matchLabels['crossplane.io/claim-name'] + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.runtime + toFieldPath: spec.forProvider.runtimeEnvironment + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.parallelism + toFieldPath: spec.forProvider.applicationConfiguration[0].flinkApplicationConfiguration[0].parallelismConfiguration[0].parallelism + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.environmentProperties + toFieldPath: spec.forProvider.applicationConfiguration[0].environmentProperties + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.region + toFieldPath: spec.forProvider.region + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.arn + toFieldPath: status.managedFlinkArn + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.id + toFieldPath: status.managedFlinkName + - name: managed-flink-role + base: + apiVersion: iam.aws.upbound.io/v1beta1 + kind: Role + metadata: + annotations: + meta.upbound.io/example-id: iam/v1beta1/role + labels: + rolePurpose: flink-application + name: example + spec: + forProvider: + assumeRolePolicy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "kinesisanalytics.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + managedPolicyArns: + - arn:aws:iam::aws:policy/AmazonKinesisFullAccess + - arn:aws:iam::aws:policy/AmazonS3FullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + inlinePolicy: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-ingress", + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards" + ] + } + ] + } + - name: logs_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:us-east-2:516535517513:log-group:flink-demo2-app-log-group" + ], + "Action": [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ] + } + ] + } + - name: metrics_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": "*", + "Action": [ + "cloudwatch:PutMetricData" + ] + } + ] + } + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-role" + - name: log-group + base: + apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 + kind: Group + metadata: + annotations: + meta.upbound.io/example-id: cloudwatchlogs/v1beta1/group + name: example + spec: + forProvider: + region: us-east-2 + retentionInDays: 7 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-log-group" + - name: log-stream + base: + apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 + kind: Stream + metadata: + annotations: + meta.upbound.io/example-id: cloudwatchlogs/v1beta1/stream + name: example + spec: + forProvider: + logGroupNameSelector: + matchControllerRef: true + name: example + region: us-east-2 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-log-stream" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: spec.forProvider.name + transforms: + - type: string + string: + type: Format + fmt: "%s-app-log-stream" + - name: starter-lambda + base: + apiVersion: lambda.aws.upbound.io/v1beta1 + kind: Function + metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta1/function + name: example + spec: + forProvider: + handler: start_flink.lambda_handler + packageType: Zip + region: us-east-2 + roleSelector: + matchLabels: + rolePurpose: starter-lambda + runtime: python3.10 + s3BucketSelector: + matchLabels: + crossplane.io/claim-name: flink-demo-bucket + s3Key: start_flink_py.zip + timeout: 60 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-starter" + - name: starter-lambda-role + base: + apiVersion: iam.aws.upbound.io/v1beta1 + kind: Role + metadata: + annotations: + meta.upbound.io/example-id: iam/v1beta1/role + labels: + rolePurpose: starter-lambda + name: example + spec: + forProvider: + assumeRolePolicy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + inlinePolicy: + - name: flink_permissions + policy: |- + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application" + ], + "Action": [ + "kinesisanalytics:DescribeApplication", + "kinesisanalytics:StartApplication" + ] + } + ] + } + managedPolicyArns: + - arn:aws:iam::aws:policy/AmazonKinesisFullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-starter-role" + - name: starter-lambda-invoke-perms + base: + apiVersion: lambda.aws.upbound.io/v1beta1 + kind: Permission + metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta1/permission + name: example-starter-permission + spec: + forProvider: + action: lambda:InvokeFunction + functionNameSelector: + matchControllerRef: true + principal: events.amazonaws.com + region: us-east-2 + sourceArn: arn:aws:events:us-east-2:516535517513:rule/example-eventbridge-rule + statementId: example-starter-permission + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-starter-permission" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: spec.forProvider.statementId + transforms: + - type: string + string: + type: Format + fmt: "%s-starter-permission" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: spec.forProvider.sourceArn + transforms: + - type: string + string: + type: Format + fmt: "arn:aws:events:us-east-2:516535517513:rule/%s-eventbridge-rule" + - name: eventbridge-rule + base: + apiVersion: cloudwatchevents.aws.upbound.io/v1beta1 + kind: Rule + metadata: + annotations: + meta.upbound.io/example-id: cloudwatchevents/v1beta2/rule + name: example-eventbridge-rule + spec: + forProvider: + description: Process Managed Flink events + eventBusName: default + eventPattern: | + { + "source": ["aws.kinesisanalytics"] + } + region: us-east-2 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-eventbridge-rule" + - name: eventbridge-target + base: + apiVersion: cloudwatchevents.aws.upbound.io/v1beta1 + kind: Target + metadata: + name: example + spec: + forProvider: + arn: example + eventBusName: default + region: us-east-2 + ruleSelector: + matchControllerRef: true + targetId: example + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + transforms: + - type: string + string: + type: Format + fmt: "%s-eventbridge-target" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: spec.forProvider.targetId + transforms: + - type: string + string: + type: Format + fmt: "%s-eventbridge-target" + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: spec.forProvider.arn + transforms: + - type: string + string: + type: Format + fmt: "arn:aws:lambda:us-east-2:516535517513:function:%s-starter" diff --git a/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml b/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml new file mode 100644 index 0000000..dbc45ae --- /dev/null +++ b/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml @@ -0,0 +1,26 @@ +apiVersion: kellinwood.com/v1alpha1 +kind: ManagedFlink +metadata: + name: flink-demo2 + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: flink-demo2 + codeBucket: flink-demo-bucket + codeFile: my-stateful-functions-embedded-java-3.3.0.jar + runtime: FLINK-1_18 + parallelism: 1 + environmentProperties: + - propertyGroup: + - propertyGroupId: StatefunApplicationProperties + propertyMap: + EVENTS_INGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress + AWS_REGION: us-east-2 + FOO: bar + compositionSelector: + matchLabels: + appReadyHandler: lambda + + diff --git a/aws-crossplane/resources/flink/flink-xrd.yaml b/aws-crossplane/resources/flink/flink-xrd.yaml new file mode 100644 index 0000000..0e2fe53 --- /dev/null +++ b/aws-crossplane/resources/flink/flink-xrd.yaml @@ -0,0 +1,61 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xmanagedflinks.kellinwood.com +spec: + group: kellinwood.com + names: + kind: XManagedFlink + plural: xmanagedflinks + claimNames: + kind: ManagedFlink + plural: managedflinks + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + properties: + resourceConfig: + properties: + region: + type: string + name: + type: string + codeBucket: + type: string + codeFile: + type: string + runtime: + type: string + parallelism: + type: number + environmentProperties: + type: array + items: + type: object + properties: + propertyGroup: + type: array + items: + type: object + properties: + propertyGroupId: + type: string + propertyMap: + type: object + additionalProperties: + type: string + type: object + type: object + status: + properties: + managedFlinkName: + type: string + managedFlinkArn: + type: string + type: object + type: object \ No newline at end of file diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml new file mode 100644 index 0000000..f47a412 --- /dev/null +++ b/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: kinesisstreams.kellinwood.com +spec: + compositeTypeRef: + apiVersion: kellinwood.com/v1alpha1 + kind: XKinesisStream + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: kinesis-stream + base: + apiVersion: kinesis.aws.upbound.io/v1beta1 + kind: Stream + metadata: + annotations: + meta.upbound.io/example-id: kinesis/v1beta1/stream + name: example + labels: + foo: bar + spec: + forProvider: + region: us-east-2 + retentionPeriod: 24 + shardCount: 1 + shardLevelMetrics: + - IncomingBytes + - OutgoingBytes + streamModeDetails: + - streamMode: PROVISIONED + tags: + createdBy: unknown-person + purpose: unknown-purpose + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.region + toFieldPath: spec.forProvider.region + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.retentionPeriod + toFieldPath: spec.forProvider.retentionPeriod + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.streamMode + toFieldPath: spec.forProvider.streamModeDetails[0].streamMode + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.shardCount + toFieldPath: spec.forProvider.shardCount + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.tags.createdBy + toFieldPath: spec.forProvider.tags.createdBy + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.tags.purpose + toFieldPath: spec.forProvider.tags.purpose + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.arn + toFieldPath: status.streamArn + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.id + toFieldPath: status.streamName diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml new file mode 100644 index 0000000..84c3e26 --- /dev/null +++ b/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml @@ -0,0 +1,15 @@ +apiVersion: kellinwood.com/v1alpha1 +kind: KinesisStream +metadata: + name: my-kinesis-stream + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: my-kinesis-stream + streamMode: PROVISIONED + shardCount: 1 + retentionPeriod: 26 + tags: + createdBy: bart.simpson@the_simpsons.com + purpose: to demonstrate creation of a Kinesis stream \ No newline at end of file diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml new file mode 100644 index 0000000..9bbc9af --- /dev/null +++ b/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml @@ -0,0 +1,61 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xkinesisstreams.kellinwood.com +spec: + group: kellinwood.com + names: + kind: XKinesisStream + plural: xkinesisstreams + claimNames: + kind: KinesisStream + plural: kinesisstreams + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + description: KinesisStreamSpec defines the desired state of the stream + properties: + resourceConfig: + description: ResourceConfig defines general properties of this AWS resource. + properties: + region: + type: string + description: region for the stream, required + name: + type: string + streamMode: + type: string + enum: + - PROVISIONED + - ON_DEMAND + description: the capacity mode of the stream, must be PROVISIONED or ON_DEMAND + shardCount: + type: number + description: The number of shards when streamMode=PROVISIONED + retentionPeriod: + type: number + description: Retention period in hours, default is 24 + tags: + description: tags to apply to the stream + type: object + properties: + createdBy: + type: string + purpose: + type: string + type: object + type: object + status: + description: KinesisStreamStatus defines the observed state of stream + properties: + streamName: + type: string + streamArn: + type: string + type: object + type: object diff --git a/aws-crossplane/resources/s3/index.js b/aws-crossplane/resources/s3/index.js new file mode 100644 index 0000000..fc315fb --- /dev/null +++ b/aws-crossplane/resources/s3/index.js @@ -0,0 +1,8 @@ + exports.handler = async (event) => { + console.log(event); + const response = { + statusCode: 200, + event: event + }; + return response; + }; diff --git a/aws-crossplane/resources/s3/s3-bucket-comp.yaml b/aws-crossplane/resources/s3/s3-bucket-comp.yaml new file mode 100644 index 0000000..d544186 --- /dev/null +++ b/aws-crossplane/resources/s3/s3-bucket-comp.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: s3buckets.kellinwood.com +spec: + compositeTypeRef: + apiVersion: kellinwood.com/v1alpha1 + kind: XS3Bucket + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: s3-bucket + base: + apiVersion: s3.aws.upbound.io/v1beta1 + kind: Bucket + metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta1/bucket + name: example + spec: + forProvider: + region: us-east-2 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: metadata.name + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.region + toFieldPath: spec.forProvider.region + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.arn + toFieldPath: status.bucketArn + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.id + toFieldPath: status.bucketName diff --git a/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml b/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml new file mode 100644 index 0000000..6ff823b --- /dev/null +++ b/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml @@ -0,0 +1,8 @@ +apiVersion: kellinwood.com/v1alpha1 +kind: S3Bucket +metadata: + name: flink-demo-bucket + namespace: default +spec: + resourceConfig: + region: us-east-2 diff --git a/aws-crossplane/resources/s3/s3-bucket-xrd.yaml b/aws-crossplane/resources/s3/s3-bucket-xrd.yaml new file mode 100644 index 0000000..a3cb835 --- /dev/null +++ b/aws-crossplane/resources/s3/s3-bucket-xrd.yaml @@ -0,0 +1,35 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xs3buckets.kellinwood.com +spec: + group: kellinwood.com + names: + kind: XS3Bucket + plural: xs3buckets + claimNames: + kind: S3Bucket + plural: s3buckets + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + properties: + resourceConfig: + properties: + region: + type: string + type: object + type: object + status: + properties: + bucketName: + type: string + bucketArn: + type: string + type: object + type: object diff --git a/aws-crossplane/resources/s3/s3-object-comp.yaml b/aws-crossplane/resources/s3/s3-object-comp.yaml new file mode 100644 index 0000000..7f9d168 --- /dev/null +++ b/aws-crossplane/resources/s3/s3-object-comp.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: s3objects.kellinwood.com +spec: + compositeTypeRef: + apiVersion: kellinwood.com/v1alpha1 + kind: XS3Object + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: s3-object + base: + apiVersion: s3.aws.upbound.io/v1beta1 + kind: Object + metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta1/object + name: placeholder + spec: + forProvider: + region: us-east-2 + key: placeholder + bucketSelector: + matchLabels: + crossplane.io/claim-name: placeholder + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: metadata.name + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.key + toFieldPath: spec.forProvider.key + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.region + toFieldPath: spec.forProvider.region + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.bucket + toFieldPath: spec.forProvider.bucketSelector.matchLabels['crossplane.io/claim-name'] + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.content + toFieldPath: spec.forProvider.content + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.contentBase64 + toFieldPath: spec.forProvider.contentBase64 + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.arn + toFieldPath: status.objectArn + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.keyy + toFieldPath: status.objectName diff --git a/aws-crossplane/resources/s3/s3-object-example-claim.yaml b/aws-crossplane/resources/s3/s3-object-example-claim.yaml new file mode 100644 index 0000000..b406b83 --- /dev/null +++ b/aws-crossplane/resources/s3/s3-object-example-claim.yaml @@ -0,0 +1,20 @@ +apiVersion: kellinwood.com/v1alpha1 +kind: S3Object +metadata: + name: index.zip + namespace: default +spec: + resourceConfig: + region: us-east-2 + bucket: flink-demo-bucket + key: index.zip +# The content of the index.zip is a single file, index.js, containing: +# exports.handler = async (event) => { +# console.log(event); +# const response = { +# statusCode: 200, +# event: event +# }; +# return response; +# }; + contentBase64: UEsDBBQAAAAIACZUdln/4COOcwAAANEAAAAIABwAaW5kZXguanNVVAkAA4fqQGeK6kBndXgLAAEE9gEAAAQUAAAAU1AAgdSKgvyikmK9jMS8lJzUIgVbhcTiyrxkBY3UstS8Ek0FWzuFai4FOEjOzyvOz0nVy8lPh6qwRpMtUShKLS4AMlKBZiFrBYHiksSS0mLn/JRUKwUjAwMdNGmwiVYQCkmqFtmOotSS0qI8uCUwKaAaAFBLAQIeAxQAAAAIACZUdln/4COOcwAAANEAAAAIABgAAAAAAAEAAACkgQAAAABpbmRleC5qc1VUBQADh+pAZ3V4CwABBPYBAAAEFAAAAFBLBQYAAAAAAQABAE4AAAC1AAAAAAA= diff --git a/aws-crossplane/resources/s3/s3-object-xrd.yaml b/aws-crossplane/resources/s3/s3-object-xrd.yaml new file mode 100644 index 0000000..3314b4f --- /dev/null +++ b/aws-crossplane/resources/s3/s3-object-xrd.yaml @@ -0,0 +1,43 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xs3objects.kellinwood.com +spec: + group: kellinwood.com + names: + kind: XS3Object + plural: xs3objects + claimNames: + kind: S3Object + plural: s3objects + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + properties: + resourceConfig: + properties: + region: + type: string + bucket: + type: string + key: + type: string + content: + type: string + contentBase64: + type: string + type: object + type: object + status: + properties: + objectName: + type: string + objectArn: + type: string + type: object + type: object diff --git a/aws-crossplane/start-flink-lambda/.gitignore b/aws-crossplane/start-flink-lambda/.gitignore new file mode 100644 index 0000000..5815224 --- /dev/null +++ b/aws-crossplane/start-flink-lambda/.gitignore @@ -0,0 +1,4 @@ +package +venv.demo +.DS_Store +*.zip diff --git a/aws-crossplane/start-flink-lambda/README.md b/aws-crossplane/start-flink-lambda/README.md new file mode 100644 index 0000000..91f12d5 --- /dev/null +++ b/aws-crossplane/start-flink-lambda/README.md @@ -0,0 +1,10 @@ +# Instructions to build the lambda handler Zip file +These instructions result in the creation of `start_flink_py.zip` +``` +cd aws-crossplane/demo/ +mkdir package +python3 -m venv venv.demo +source venv.demo/bin/activate +pip install --target ./package boto3 +./build_start_flink_py_zip.sh +``` diff --git a/aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh b/aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh new file mode 100755 index 0000000..1ca80f2 --- /dev/null +++ b/aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh @@ -0,0 +1,11 @@ +#! /bin/bash + +set -e + +cd $(dirname $0) +rm -rf start_flink_py.zip +cd package +zip -r ../start_flink_py.zip . +cd .. +zip start_flink_py.zip start_flink.py + diff --git a/aws-crossplane/start-flink-lambda/start_flink.py b/aws-crossplane/start-flink-lambda/start_flink.py new file mode 100644 index 0000000..c80a3c9 --- /dev/null +++ b/aws-crossplane/start-flink-lambda/start_flink.py @@ -0,0 +1,48 @@ +import logging +import boto3 + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +def lambda_handler(event, context): + + if 'detail' in event: + event_detail = event['detail'] + else: + logger.info('Ignoring - missing .detail in : {}'.format(event)) + return + + event_name = 'UnknownEventName' + if 'eventName' in event_detail: + event_name = event_detail['eventName'] + + logger.info('Incoming event {}: {}'.format(event_name, event)) + + # Ignore events other than Create or Update, + if event_name not in ['CreateApplication', 'UpdateApplication']: + logger.info('Ignoring - eventName={}'.format(event_name)) + return + + try: + region = event['region'] + application_name = event_detail['responseElements']['applicationDetail']['applicationName'] + + # kinesisanalyticsv2 API reference: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesisanalyticsv2.html + client_kda = boto3.client('kinesisanalyticsv2', region_name=region) + + describe_response = client_kda.describe_application(ApplicationName=application_name) + logger.info(f'describe_application response: {describe_response}') + + # get application status. + application_status = describe_response['ApplicationDetail']['ApplicationStatus'] + + # an application can be started from 'READY' status only. + if application_status != 'READY': + logger.info('No-op for Application {} because ApplicationStatus {} is filtered'.format(application_name, application_status)) + return + + # this call doesn't wait for an application to transfer to 'RUNNING' state. + client_kda.start_application(ApplicationName=application_name) + logger.info('Started Application: {}'.format(application_name)) + except Exception as err: + logger.error(err) From 885da7da47e1baa93d79db99bb308525976c2d02 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Wed, 4 Dec 2024 14:45:44 -1000 Subject: [PATCH 06/28] Remove creds --- .gitignore | 1 + aws-crossplane/local/aws/manifests/credentials.yaml | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 16409d5..dc2682a 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ dependency-reduced-pom.xml *~ .*~ .cwlogs +.DS_Store \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/credentials.yaml b/aws-crossplane/local/aws/manifests/credentials.yaml index 9cc8785..c867f9e 100644 --- a/aws-crossplane/local/aws/manifests/credentials.yaml +++ b/aws-crossplane/local/aws/manifests/credentials.yaml @@ -3,9 +3,9 @@ apiVersion: v1 stringData: creds: | [default] - aws_access_key_id=ASIAXQQ7KQFETOZQOKBW - aws_secret_access_key=4D5+go8Xu/ECTkhEvo1ElSQzsWGsPQhqHdtfErR0 - aws_session_token=IQoJb3JpZ2luX2VjEE8aCXVzLWVhc3QtMiJIMEYCIQDMk2rlsyaegjXW0veslAkZZLnLQF7D1QTdgZ9y6keuGwIhAJDRpIhgiFFmzJJgZt02YcQoJreR/u9M0H4XfG1VtlvEKqoDCPj//////////wEQABoMNTE2NTM1NTE3NTEzIgzdOexLddWNYoPpRpgq/gJIm5MoOmHKO1tLHDX12/sTDb/P4PVxq/7Weq75DQE7N7w4HHMGtTWv1TJmkkYqiNSs0m2l62XQq2zDMo3HB+QXTXnTmXeO/0d1fZtcmwn6mOUigs3G+Sa607St31bDKfBS6MqnCNCtQqdMHa612zjwivZ5b8cayFiHEimyiyeLli0XOYQheve3rvMHp6ZMU+DONHXNLO9WheMtdgwRGjRAW7TCxI/oXvIZnjLKoTX810sWv+xmJBF7a95ED/ScX5Y/PYwfLhX684KE0my+odSFz3MtJj6nEQgY2KGXKwu9apfRje6J0f1FHb5HmfIcxL45JEkY+o9NYY7vR8xsdSZPndgNXUVgR+cXnaV19ckFAkGYW0HauK15uKGDxkPtWTsc3mmGaQSsPJMZwK8hOja7OG8+HdemNCjzbRmZYsUdua3ZkQOdpItjxq1B6yt56VXlWrQIOa9C5C6oOuxyUhqIs5rT912+ZQbRWSEKG7Ac+cE97MTgp/4jHeTGnb8AMNm7w7oGOqUBPMiT81S3m87M3JAaX0BdHmlagCXqxpnvcgHHZklR99yhexuEEYb8E/TUFNvb1l0X0Nn991XSS+LHpjdmysDB5dYF0UWNjxogQuj5eTnF6rNX67f5srcMBQDyWgFbf1YU33eDMrR1g6AzcSYuGKH+aCsGg/RdcxPGTw7cKi3BQ6Pd3penTmk5qPAJDUVl/ZYWeVR5XhViIiGrKI6TV6kzZX0mQcky + aws_access_key_id=REPLACE + aws_secret_access_key=REPLACE + aws_session_token=REPLACE kind: Secret metadata: creationTimestamp: null From 8d4f2c2c32fd58994d9c76485dc1d73e84a45d3e Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Wed, 4 Dec 2024 15:03:22 -1000 Subject: [PATCH 07/28] Fix broken formatting in README.md --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 892fe73..9ddf3c5 100644 --- a/README.md +++ b/README.md @@ -321,7 +321,6 @@ Wait until the Flink application is in the 'Running' state, then execute the fol ./poc-get-events.sh ``` -``` #### Cleanup ``` From 8f3cf756e1101d7826b5bab2ce3d62e14088839d Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Wed, 4 Dec 2024 20:27:10 -1000 Subject: [PATCH 08/28] Fix issues in README --- README.md | 2 +- aws-crossplane/start-flink-lambda/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9ddf3c5..be552b9 100644 --- a/README.md +++ b/README.md @@ -278,7 +278,7 @@ for i in $(find resources -name \*xrd.yaml -o -name \*comp.yaml); do k apply -f At the time of this writing the demo does not utilize a custom composition function. Instead, it uses the off-the-shelf function `function-patch-and-transform` which gets loaded during IDP creation, above. -##### Provision AWS MAnaged Flink via Crossplane claims +##### Provision AWS Managed Flink via Crossplane claims Provision the S3 bucket and Kinesis streams... ``` diff --git a/aws-crossplane/start-flink-lambda/README.md b/aws-crossplane/start-flink-lambda/README.md index 91f12d5..1b0cd99 100644 --- a/aws-crossplane/start-flink-lambda/README.md +++ b/aws-crossplane/start-flink-lambda/README.md @@ -1,7 +1,7 @@ # Instructions to build the lambda handler Zip file These instructions result in the creation of `start_flink_py.zip` ``` -cd aws-crossplane/demo/ +cd aws-crossplane/start-flink-lambda/ mkdir package python3 -m venv venv.demo source venv.demo/bin/activate From 7b4bb91087a3a9eafe9b605cb7fc7fbbe61c7dde Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Wed, 11 Dec 2024 15:00:29 -1000 Subject: [PATCH 09/28] Provision via Terraform and other minor changes --- .gitignore | 5 +- README.md | 30 +++ aws-crossplane/port-forward-idp-localstack.sh | 6 + .../flink/flink-basic-example-claim.yaml | 4 +- aws-terraform/main.tf | 238 ++++++++++++++++++ 5 files changed, 280 insertions(+), 3 deletions(-) create mode 100644 aws-crossplane/port-forward-idp-localstack.sh create mode 100644 aws-terraform/main.tf diff --git a/.gitignore b/.gitignore index dc2682a..a3389f7 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,7 @@ dependency-reduced-pom.xml *~ .*~ .cwlogs -.DS_Store \ No newline at end of file +.DS_Store +venv* +.terraform* +terraform.tfstate* \ No newline at end of file diff --git a/README.md b/README.md index be552b9..e2ed389 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,36 @@ left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` d ./poc-get-events.sh ``` +### Provisioning via Terraform + +Requires installing the [Terraform CLI](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) + +Steps + +```shell +cd aws-terraform +# Set AWS variables AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, etc +terraform init +terraform apply +``` +While the `terraform apply` command is running, upload the application JAR file to the S3 bucket. The upload may fail if the S3 bucket has not been created by Terraform yet, but keep trying until it succeeds. + +```shell +export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar \ + s3://flink-demo-bucket-${AWS_ACCOUNT_ID}/ +``` + +Follow the directions from the Crossplane section regarding sending sample events. Use the scripts in the `aws-crossplane` directory to send the sample input events, get the events written to the egress stream, and view the Flink application logging output. + +Cleanup by deleting the jar file from the S3 bucket, `flink-demo-bucket-${AWS_ACCOUNT_ID}` and running the command: + +``` +terraform destroy +``` + +The Kinesis stream `flink-demo-ingress` must be manually deleted since Flink adds a Fanout consumer to the stream, and the consumer will block deletion. + ### Provisioning via Crossplane #### Prerequisites: diff --git a/aws-crossplane/port-forward-idp-localstack.sh b/aws-crossplane/port-forward-idp-localstack.sh new file mode 100644 index 0000000..f651c8e --- /dev/null +++ b/aws-crossplane/port-forward-idp-localstack.sh @@ -0,0 +1,6 @@ +#! /bin/sh + +# This script requires the jq command: https://jqlang.github.io/jq/ + +localstack_pod_name=$(kubectl -n localstack get pods -o json | jq -cr .items[0].metadata.name) +kubectl -n localstack port-forward $localstack_pod_name 4566:4566 diff --git a/aws-crossplane/resources/flink/flink-basic-example-claim.yaml b/aws-crossplane/resources/flink/flink-basic-example-claim.yaml index 8b5355c..0001f98 100644 --- a/aws-crossplane/resources/flink/flink-basic-example-claim.yaml +++ b/aws-crossplane/resources/flink/flink-basic-example-claim.yaml @@ -15,8 +15,8 @@ spec: - propertyGroup: - propertyGroupId: StatefunApplicationProperties propertyMap: - EVENTS_INGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress - EVENTS_EGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress + EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress AWS_REGION: us-east-2 compositionSelector: matchLabels: diff --git a/aws-terraform/main.tf b/aws-terraform/main.tf new file mode 100644 index 0000000..1622632 --- /dev/null +++ b/aws-terraform/main.tf @@ -0,0 +1,238 @@ +provider "aws" { + region = "us-east-2" +} + +# Caller identity allows referencing the account ID w/o having to hard-code it in the bucket name +data "aws_caller_identity" "current" {} +# Same for aws_region so we don't have to hard-code the Flink environment variables. +data "aws_region" "current" {} + +resource "aws_s3_bucket" "flink_demo_bucket" { + # Bucket names must be globally unique, so I'm appending the account ID to workaround BucketAlreadyExists + bucket = "flink-demo-bucket-${data.aws_caller_identity.current.account_id}" +} + +resource "aws_s3_bucket_ownership_controls" "flink_demo_bucket_ownership_controls" { + bucket = aws_s3_bucket.flink_demo_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "flink_demo_bucket_acl" { + depends_on = [aws_s3_bucket_ownership_controls.flink_demo_bucket_ownership_controls] + + bucket = aws_s3_bucket.flink_demo_bucket.id + acl = "private" +} + +resource "aws_kinesis_stream" "flink_demo_ingress" { + name = "flink-demo-ingress" + shard_count = 1 + retention_period = 24 # Retention period in hours + + shard_level_metrics = [ + "IncomingBytes", + "OutgoingBytes", + ] + + stream_mode_details { + stream_mode = "PROVISIONED" + } +} + +resource "aws_kinesis_stream" "flink_demo_egress" { + name = "flink-demo-egress" + shard_count = 1 + retention_period = 24 # Retention period in hours + + shard_level_metrics = [ + "IncomingBytes", + "OutgoingBytes", + ] + + stream_mode_details { + stream_mode = "PROVISIONED" + } +} + + +resource "aws_iam_role" "flink_application_role" { + name = "flink-application-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "kinesisanalytics.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "kinisis_full_access" { + role = aws_iam_role.flink_application_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" +} + +resource "aws_iam_role_policy_attachment" "s3_full_access" { + role = aws_iam_role.flink_application_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_full_access" { + role = aws_iam_role.flink_application_role.name + policy_arn = "arn:aws:iam::aws:policy/CloudWatchFullAccess" +} + +resource "aws_iam_role_policy" "flink_app_s3_policy" { + name = "flink-app-s3-policy" + role = aws_iam_role.flink_application_role.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "s3:GetObject", + "s3:ListBucket" + ] + Effect = "Allow" + Resource = [ + aws_s3_bucket.flink_demo_bucket.arn, + "${aws_s3_bucket.flink_demo_bucket.arn}/*" + ] + }] + }) +} + +resource "aws_iam_role_policy" "flink_app_kinesis_policy" { + name = "flink-app-kinesis-policy" + role = aws_iam_role.flink_application_role.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Resource = [ + "${aws_kinesis_stream.flink_demo_ingress.arn}", + "${aws_kinesis_stream.flink_demo_egress.arn}" + ] + Action = [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards" + ] + }] + }) +} + +resource "aws_iam_role_policy" "flink_app_logs_policy" { + name = "flink-app-logs-policy" + role = aws_iam_role.flink_application_role.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Resource = [ + "${aws_cloudwatch_log_group.flink_demo_log_group.arn}" + ] + Action = [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ] + }] + }) +} + +resource "aws_cloudwatch_log_group" "flink_demo_log_group" { + name = "flink-demo-log-group" + retention_in_days = 14 +} + +resource "aws_cloudwatch_log_stream" "flink_demo_log_stream" { + name = "flink-demo-log-stream" + log_group_name = aws_cloudwatch_log_group.flink_demo_log_group.name +} + +resource "aws_iam_role_policy" "flink_app_metrics_policy" { + name = "flink-app-metrics-policy" + role = aws_iam_role.flink_application_role.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Resource = "*" + Action = [ + "cloudwatch:PutMetricData" + ] + }] + }) +} + +# Reference: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kinesisanalyticsv2_application +resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { + name = "flink_demo_tf" + runtime_environment = "FLINK-1_18" + service_execution_role = aws_iam_role.flink_application_role.arn + application_mode = "STREAMING" + start_application = true + + application_configuration { + application_code_configuration { + code_content { + s3_content_location { + bucket_arn = aws_s3_bucket.flink_demo_bucket.arn + file_key = "my-stateful-functions-embedded-java-3.3.0.jar" + } + } + code_content_type = "ZIPFILE" + } + + environment_properties { + property_group { + property_group_id = "StatefunApplicationProperties" + + property_map = { + EVENTS_INGRESS_STREAM_DEFAULT = "${aws_kinesis_stream.flink_demo_ingress.name}" + EVENTS_EGRESS_STREAM_DEFAULT = "${aws_kinesis_stream.flink_demo_egress.name}" + AWS_REGION = data.aws_region.current.name + } + } + } + + flink_application_configuration { + checkpoint_configuration { + configuration_type = "CUSTOM" + checkpoint_interval = 300000 # 5 mins * 60 secs/min * 1000 millis/sec + checkpointing_enabled = true + } + monitoring_configuration { + configuration_type = "CUSTOM" + log_level = "INFO" + metrics_level = "TASK" + } + parallelism_configuration { + auto_scaling_enabled = false + configuration_type = "CUSTOM" + parallelism = 1 + parallelism_per_kpu = 1 + } + } + + run_configuration { + application_restore_configuration { + application_restore_type = "RESTORE_FROM_LATEST_SNAPSHOT" + # snapshot_name = "xyz" + } + flink_run_configuration { + allow_non_restored_state = false + } + } + } + cloudwatch_logging_options { + log_stream_arn = aws_cloudwatch_log_stream.flink_demo_log_stream.arn + } +} From b62cb743bc54c331b595db3bc51c77083cd21eb9 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Wed, 11 Dec 2024 15:26:35 -1000 Subject: [PATCH 10/28] README updates --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index e2ed389..be43db8 100644 --- a/README.md +++ b/README.md @@ -203,11 +203,11 @@ Steps ```shell cd aws-terraform -# Set AWS variables AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, etc +# Configure your AWS profile, set AWS env vars, or run 'aws configure sso', etc terraform init -terraform apply +terraform apply # When prompted, enter 'yes' ``` -While the `terraform apply` command is running, upload the application JAR file to the S3 bucket. The upload may fail if the S3 bucket has not been created by Terraform yet, but keep trying until it succeeds. +Immediately after entering 'yes' to the prompt issued by `terraform apply`, switch to another shell/terminal tab and upload the application JAR file to the S3 bucket. The upload may fail if the S3 bucket has not been created by Terraform yet, so keep trying until it succeeds. ```shell export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account @@ -215,12 +215,12 @@ aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar \ s3://flink-demo-bucket-${AWS_ACCOUNT_ID}/ ``` -Follow the directions from the Crossplane section regarding sending sample events. Use the scripts in the `aws-crossplane` directory to send the sample input events, get the events written to the egress stream, and view the Flink application logging output. +Follow the directions near the end of the Crossplane section, below, regarding sending sample events. Use the scripts in the `aws-crossplane` directory to send the sample input events, get the events written to the egress stream, and view the Flink application logging output. Cleanup by deleting the jar file from the S3 bucket, `flink-demo-bucket-${AWS_ACCOUNT_ID}` and running the command: -``` -terraform destroy +```shell +terraform destroy # When prompted, enter 'yes' ``` The Kinesis stream `flink-demo-ingress` must be manually deleted since Flink adds a Fanout consumer to the stream, and the consumer will block deletion. From 4f55f902a6ba3f5c097afb7dc8bdf83fc1ed4397 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Wed, 11 Dec 2024 15:51:29 -1000 Subject: [PATCH 11/28] Enable snapshots --- aws-terraform/main.tf | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aws-terraform/main.tf b/aws-terraform/main.tf index 1622632..440e405 100644 --- a/aws-terraform/main.tf +++ b/aws-terraform/main.tf @@ -191,6 +191,10 @@ resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { code_content_type = "ZIPFILE" } + application_snapshot_configuration { + snapshots_enabled = true + } + environment_properties { property_group { property_group_id = "StatefunApplicationProperties" From 2fa3724693d218ea3d886636e064fd7ab6eacf38 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 12 Dec 2024 08:37:36 -1000 Subject: [PATCH 12/28] Drop "2" from demo name, dont set startApplication by default --- README.md | 11 ++++++----- aws-crossplane/NOTES.md | 12 ++++++------ aws-crossplane/claims/managed-flink-claim.yaml | 10 ++++------ aws-crossplane/poc-tail-logs.sh | 4 ++-- aws-crossplane/resources/flink/flink-basic-comp.yaml | 4 +++- .../resources/flink/flink-basic-example-claim.yaml | 1 + .../resources/flink/flink-lambda-comp.yaml | 5 ++--- .../resources/flink/flink-lambda-example-claim.yaml | 4 ++-- aws-crossplane/resources/flink/flink-xrd.yaml | 2 ++ 9 files changed, 28 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index be43db8..ae58306 100644 --- a/README.md +++ b/README.md @@ -266,7 +266,7 @@ The lambda will be provisioned along with AWS Managed Flink via a single claim, ##### Create the CloudWatch log group for the lambda Login to AWS Identity Center and launch the web console for the Sandbox account. -Confirm the existence of, and create if necessary, the CloudWatch log group `/aws/lambda/flink-demo2-starter`. I can't +Confirm the existence of, and create if necessary, the CloudWatch log group `/aws/lambda/flink-demo-starter`. I can't figure out how to do this using the managed resource provided by `provider-aws-cloudwatchlogs` because the log group for the lambda must be named exactly that, the MR doesn't provide a way to set the name explicitly, and k8s/crossplane doesn't like the slashes in `metadata.name`. @@ -327,10 +327,11 @@ Visit the S3 services page. Find the S3 bucket (flink-demo-bucket-*) and upload - `../target/my-stateful-functions-embedded-java-3.3.0.jar` (Flink demo application code) - `start-flink-lambda/start_flink_py.zip` (Lambda handler code which transitions the Managed Flink instance to the 'Running' state) -Alternatively, use the AWS CLI to upload the files to the bucket (replace `XXXXX` with the bucket's unique suffix)... +Alternatively, use the AWS CLI to upload the files... ``` -aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://flink-demo-bucket-XXXXX/my-stateful-functions-embedded-java-3.3.0.jar -aws s3 cp start-flink-lambda/start_flink_py.zip s3://flink-demo-bucket-XXXXX/start_flink_py.zip +flink_bucket_name=$(kubectl get managed | grep bucket | awk '{print $4}') +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://${flink_bucket_name}/my-stateful-functions-embedded-java-3.3.0.jar +aws s3 cp start-flink-lambda/start_flink_py.zip s3://${flink_bucket_name}/start_flink_py.zip ``` ##### Provision the Managed Flink application @@ -365,5 +366,5 @@ Shut down the local IDP with the command: idpbuilder delete ``` -Manually remove the CloudWatch log group `/aws/lambda/flink-demo2-starter`. +Manually remove the CloudWatch log group `/aws/lambda/flink-demo-starter`. diff --git a/aws-crossplane/NOTES.md b/aws-crossplane/NOTES.md index 779558f..e90db68 100644 --- a/aws-crossplane/NOTES.md +++ b/aws-crossplane/NOTES.md @@ -1,14 +1,14 @@ I initially put the stream ARN values in the environment section of (the managed flink claim)[./claims/mananged-flink-claim.yaml]. Just the plain stream names are required, however after updating the values in the claim and applying the change, I see this -error in the output of `kubectl describe application.kinesisanalyticsv2.aws.upbound.io/flink-demo2-application`... +error in the output of `kubectl describe application.kinesisanalyticsv2.aws.upbound.io/flink-demo-application`... ``` Warning CannotUpdateExternalResource 4m19s (x14 over 6m31s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application (combined from similar events): async update failed: failed to update the resource: [{0 updating Kinesis Analytics v2 Application -(arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application): operation error Kinesis Analytics V2: +(arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application): operation error Kinesis Analytics V2: UpdateApplication, https response error StatusCode: 400, RequestID: 39586af4-c1cc-4515-b818-c86f8f176671, -InvalidApplicationConfigurationException: Failed to take snapshot for the application flink-demo2-application at this moment. +InvalidApplicationConfigurationException: Failed to take snapshot for the application flink-demo-application at this moment. The application is currently experiencing downtime. Please check the application's CloudWatch metrics or CloudWatch logs for any possible errors and retry the request. You can also retry the request after disabling the snapshots in the Kinesis Data Analytics console or by updating the ApplicationSnapshotConfiguration through the AWS SDK. []}] @@ -23,9 +23,9 @@ showed in the AWS console, but the new app got stuck on this: ``` Warning CannotCreateExternalResource 51s (x39 over 4m41s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application (combined from similar events): async create failed: failed to create the resource: [{0 creating Kinesis Analytics v2 Application -(flink-demo2-application): operation error Kinesis Analytics V2: CreateApplication, https response error StatusCode: 400, +(flink-demo-application): operation error Kinesis Analytics V2: CreateApplication, https response error StatusCode: 400, RequestID: 64366786-9f40-440f-8fcd-c3376f0cc619, ConcurrentModificationException: Tags are already registered for this -resource ARN: arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application, please retry later. +resource ARN: arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application, please retry later. Or you can create without tags and then add tags using TagResource API after successful resource creation. []}] ``` @@ -39,7 +39,7 @@ But in the AWS Console, the app seems stuck with the 'Updating' status. OK, wai ``` { - "applicationARN": "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application", + "applicationARN": "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application", "applicationVersionId": "3", "locationInformation": "org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.translateDeleteException(MultiObjectDeleteSupport.java:107)", "logger": "org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport", diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index 3f386d4..a55a867 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -1,15 +1,16 @@ apiVersion: kellinwood.com/v1alpha1 kind: ManagedFlink metadata: - name: flink-demo2 + name: flink-demo namespace: default spec: resourceConfig: region: us-east-2 - name: flink-demo2 + name: flink-demo codeBucket: flink-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 + startApplication: true parallelism: 1 environmentProperties: - propertyGroup: @@ -18,9 +19,6 @@ spec: EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress AWS_REGION: us-east-2 - FOO: bar compositionSelector: matchLabels: - appReadyHandler: lambda - - + appReadyHandler: none \ No newline at end of file diff --git a/aws-crossplane/poc-tail-logs.sh b/aws-crossplane/poc-tail-logs.sh index 0f120bf..faa7ed0 100755 --- a/aws-crossplane/poc-tail-logs.sh +++ b/aws-crossplane/poc-tail-logs.sh @@ -20,8 +20,8 @@ while true; do aws logs get-log-events \ --start-from-head \ $NEXT_TOKEN_ARG \ - --log-group-name flink-demo2-app-log-group \ - --log-stream-name flink-demo2-app-log-stream \ + --log-group-name flink-demo-app-log-group \ + --log-stream-name flink-demo-app-log-stream \ >$CWLOG_FILE NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) diff --git a/aws-crossplane/resources/flink/flink-basic-comp.yaml b/aws-crossplane/resources/flink/flink-basic-comp.yaml index 215d879..d390630 100644 --- a/aws-crossplane/resources/flink/flink-basic-comp.yaml +++ b/aws-crossplane/resources/flink/flink-basic-comp.yaml @@ -71,7 +71,6 @@ spec: matchControllerRef: true region: us-east-2 runtimeEnvironment: FLINK-1_18 - startApplication: true serviceExecutionRoleSelector: matchLabels: rolePurpose: flink-application @@ -95,6 +94,9 @@ spec: - type: FromCompositeFieldPath fromFieldPath: spec.resourceConfig.runtime toFieldPath: spec.forProvider.runtimeEnvironment + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.startApplication + toFieldPath: spec.forProvider.startApplication - type: FromCompositeFieldPath fromFieldPath: spec.resourceConfig.parallelism toFieldPath: spec.forProvider.applicationConfiguration[0].flinkApplicationConfiguration[0].parallelismConfiguration[0].parallelism diff --git a/aws-crossplane/resources/flink/flink-basic-example-claim.yaml b/aws-crossplane/resources/flink/flink-basic-example-claim.yaml index 0001f98..84d4e09 100644 --- a/aws-crossplane/resources/flink/flink-basic-example-claim.yaml +++ b/aws-crossplane/resources/flink/flink-basic-example-claim.yaml @@ -11,6 +11,7 @@ spec: codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 parallelism: 1 +# startApplication: true environmentProperties: - propertyGroup: - propertyGroupId: StatefunApplicationProperties diff --git a/aws-crossplane/resources/flink/flink-lambda-comp.yaml b/aws-crossplane/resources/flink/flink-lambda-comp.yaml index c0f3f9f..aea4e58 100644 --- a/aws-crossplane/resources/flink/flink-lambda-comp.yaml +++ b/aws-crossplane/resources/flink/flink-lambda-comp.yaml @@ -71,7 +71,6 @@ spec: matchControllerRef: true region: us-east-2 runtimeEnvironment: FLINK-1_18 - startApplication: true serviceExecutionRoleSelector: matchLabels: rolePurpose: flink-application @@ -168,7 +167,7 @@ spec: { "Effect": "Allow", "Resource": [ - "arn:aws:logs:us-east-2:516535517513:log-group:flink-demo2-app-log-group" + "arn:aws:logs:us-east-2:516535517513:log-group:flink-demo-app-log-group" ], "Action": [ "logs:DescribeLogGroups", @@ -326,7 +325,7 @@ spec: { "Effect": "Allow", "Resource": [ - "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo2-application" + "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application" ], "Action": [ "kinesisanalytics:DescribeApplication", diff --git a/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml b/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml index dbc45ae..8263874 100644 --- a/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml +++ b/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml @@ -1,12 +1,12 @@ apiVersion: kellinwood.com/v1alpha1 kind: ManagedFlink metadata: - name: flink-demo2 + name: flink-demo namespace: default spec: resourceConfig: region: us-east-2 - name: flink-demo2 + name: flink-demo codeBucket: flink-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 diff --git a/aws-crossplane/resources/flink/flink-xrd.yaml b/aws-crossplane/resources/flink/flink-xrd.yaml index 0e2fe53..f39594d 100644 --- a/aws-crossplane/resources/flink/flink-xrd.yaml +++ b/aws-crossplane/resources/flink/flink-xrd.yaml @@ -33,6 +33,8 @@ spec: type: string parallelism: type: number + startApplication: + type: boolean environmentProperties: type: array items: From 7f2ba306c318b0a4bfa4aee8cc8d27abad8d2a61 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 12 Dec 2024 08:40:50 -1000 Subject: [PATCH 13/28] Don't include startApplication by default --- aws-crossplane/claims/managed-flink-claim.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index a55a867..910e893 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -10,7 +10,7 @@ spec: codeBucket: flink-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 - startApplication: true +# startApplication: true parallelism: 1 environmentProperties: - propertyGroup: From 88fa32535cedf4a1767a19b30ffaf05a6c5a4050 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 12 Dec 2024 09:51:13 -1000 Subject: [PATCH 14/28] Update README w/ latest info, +tweaks --- README.md | 28 +++++++++++++------ .../claims/managed-flink-claim.yaml | 4 +-- .../flink/flink-lambda-example-claim.yaml | 5 ++-- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index ae58306..0e596e6 100644 --- a/README.md +++ b/README.md @@ -251,17 +251,25 @@ The files to run the crossplane demo are in the [aws-crossplane](./aws-crossplan ##### Build the lambda handler package. What? A lambda? -The [managed resource for creating AWS Managed Flink applications](https://marketplace.upbound.io/providers/upbound/provider-aws-kinesisanalyticsv2/v1.17.0/resources/kinesisanalyticsv2.aws.upbound.io/Application/v1beta1) -will do most of the work to get the Flink application provisioned, but if nothing else is done the application will -become 'Ready', and not 'Running'. Additional resources are required to auto-run the Flink app... namely a lambda that will +> IMPORTANT: Using the lambda is optional and not recommended. + +> At one point it appeared that the managed resource for Flink wouldn't start the Flink application, and that like the CloudWatch +approach, a lambda is needed to handle events from the AWS resource and transition the application to the `Running` state. This +is not actually the case, but support for the lambda is still included. To use the lambda, it must be packaged and uploaded to the +S3 bucket, and the managed flink claim must select the 'lambda' composition. + +> When using the [flink-lambda](./aws-crossplane/resources/flink/flink-lambda-comp.yaml) composition, +the [managed resource for creating AWS Managed Flink applications](https://marketplace.upbound.io/providers/upbound/provider-aws-kinesisanalyticsv2/v1.17.0/resources/kinesisanalyticsv2.aws.upbound.io/Application/v1beta1) +will do most of the work to get the Flink application provisioned, and the application will +become 'Ready' (not 'Running'). In this case, the lambda will invoke an API call to start the application. This is in following with how it works when provisioning via CloudFormation. In CloudFormation though, the Lambda code can be inlined in a CloudFormation template, but in Crossplane the Lambda code must be referenced separately, e.g., via reference to the lambda package in an S3 file. -Build the lambda package by following [the instructions here](./aws-crossplane/start-flink-lambda/README.md). The resulting Zip file will be +> Build the lambda package by following [the instructions here](./aws-crossplane/start-flink-lambda/README.md). The resulting Zip file will be uploaded to S3 later, as you follow the steps below. -The lambda will be provisioned along with AWS Managed Flink via a single claim, below. +> The lambda will be provisioned along with AWS Managed Flink via a single claim, below. ##### Create the CloudWatch log group for the lambda Login to AWS Identity Center and launch the web console for the Sandbox account. @@ -325,23 +333,27 @@ Return to AWS Identity Center and launch the web console for the Sandbox account Visit the S3 services page. Find the S3 bucket (flink-demo-bucket-*) and upload the following files to the bucket - `../target/my-stateful-functions-embedded-java-3.3.0.jar` (Flink demo application code) -- `start-flink-lambda/start_flink_py.zip` (Lambda handler code which transitions the Managed Flink instance to the 'Running' state) +- `start-flink-lambda/start_flink_py.zip` (Optional, lambda handler code which transitions the Managed Flink instance to the 'Running' state) Alternatively, use the AWS CLI to upload the files... ``` flink_bucket_name=$(kubectl get managed | grep bucket | awk '{print $4}') aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://${flink_bucket_name}/my-stateful-functions-embedded-java-3.3.0.jar -aws s3 cp start-flink-lambda/start_flink_py.zip s3://${flink_bucket_name}/start_flink_py.zip +aws s3 cp start-flink-lambda/start_flink_py.zip s3://${flink_bucket_name}/start_flink_py.zip # optional ``` ##### Provision the Managed Flink application -Applying the following claim will trigger the creation of the Flink application, its role, and log groups. Note that the Flink application will become 'Ready' but will not run on its own. Additional resources are required to auto-run the Flink app... a lambda for handling EventBridge events from the Flink application, an EventBridge rule and trigger to invoke the lambda, an IAM role allowing the lambda to make API calls to observe and control the Flink app, plus a permission for the EventBridge rule to invoke the lambda as a target. When the lambda sees that the Flink application is in the Ready state, it will invoke an API call to start the application. +Applying the following claim will trigger the creation of the Flink application, its role, and log groups. Note that by default Flink application will become 'Ready' since `startApplication: true` is commented-out in the claim. ``` kubectl apply -f claims/managed-flink-claim.yaml ``` +Visit the AWS Managed Flink applications page in the web console. When the application statis is `Ready`, then uncomment the `startAppication: true` line in the `managed-flink-claim.yaml` file and re-run the `kubectl apply -f claims/managed-flink-claim.yaml` command. If the initial claim apply is performed with `startApplication: true` then Crossplane appears to go into a loop where it updates the application every few minutes, and so it switches back and forth between `Running` and `Updating` :( + +> If using the `flink-lambda` composition, no further action should be required to transition the Flink application to the `Running` state. + Wait until the Flink application is in the 'Running' state, then execute the following commands to send events and see the results: ``` diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index 910e893..e4d033b 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -10,7 +10,7 @@ spec: codeBucket: flink-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 -# startApplication: true + # startApplication: true parallelism: 1 environmentProperties: - propertyGroup: @@ -21,4 +21,4 @@ spec: AWS_REGION: us-east-2 compositionSelector: matchLabels: - appReadyHandler: none \ No newline at end of file + appReadyHandler: none # or maybe 'lambda'. See ../../README.md#build-the-lambda-handler-package--what-a-lambda \ No newline at end of file diff --git a/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml b/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml index 8263874..b088589 100644 --- a/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml +++ b/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml @@ -15,10 +15,9 @@ spec: - propertyGroup: - propertyGroupId: StatefunApplicationProperties propertyMap: - EVENTS_INGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress - EVENTS_EGRESS_STREAM_DEFAULT: arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress + EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress AWS_REGION: us-east-2 - FOO: bar compositionSelector: matchLabels: appReadyHandler: lambda From f60ffb360c57aaca31be839132e2fa8a990db7c6 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 12 Dec 2024 15:26:03 -1000 Subject: [PATCH 15/28] More README fixes --- README.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0e596e6..3196aca 100644 --- a/README.md +++ b/README.md @@ -249,9 +249,11 @@ provisioned via a separate claim. Also, see my note below regarding the creatio The files to run the crossplane demo are in the [aws-crossplane](./aws-crossplane) directory. +Skip the instructions for using a lambda to start the Flink application. Go to [here instead](#start-the-local-idp-configured-to-use-aws) + ##### Build the lambda handler package. What? A lambda? -> IMPORTANT: Using the lambda is optional and not recommended. +> :warning: IMPORTANT: Using the lambda is optional and not recommended. > At one point it appeared that the managed resource for Flink wouldn't start the Flink application, and that like the CloudWatch approach, a lambda is needed to handle events from the AWS resource and transition the application to the `Running` state. This @@ -272,6 +274,9 @@ uploaded to S3 later, as you follow the steps below. > The lambda will be provisioned along with AWS Managed Flink via a single claim, below. ##### Create the CloudWatch log group for the lambda + +> :warning: OPTIONAL: Follow these instructions only if you are using the lambda to start the Flink application + Login to AWS Identity Center and launch the web console for the Sandbox account. Confirm the existence of, and create if necessary, the CloudWatch log group `/aws/lambda/flink-demo-starter`. I can't @@ -339,13 +344,17 @@ Alternatively, use the AWS CLI to upload the files... ``` flink_bucket_name=$(kubectl get managed | grep bucket | awk '{print $4}') aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://${flink_bucket_name}/my-stateful-functions-embedded-java-3.3.0.jar -aws s3 cp start-flink-lambda/start_flink_py.zip s3://${flink_bucket_name}/start_flink_py.zip # optional + +# If using the lambda to start the Flink application, upload the lambda package +aws s3 cp start-flink-lambda/start_flink_py.zip s3://${flink_bucket_name}/start_flink_py.zip ``` ##### Provision the Managed Flink application Applying the following claim will trigger the creation of the Flink application, its role, and log groups. Note that by default Flink application will become 'Ready' since `startApplication: true` is commented-out in the claim. +To use the lambda to start the Flink application, update the file `claims/managed-flink-claim.yaml` and change the value for `appReadyHandler` to `lambda`. + ``` kubectl apply -f claims/managed-flink-claim.yaml ``` From 06bc543650be3dcc14f29feb036681c6dad78fc3 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 12 Dec 2024 15:30:24 -1000 Subject: [PATCH 16/28] README updates --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 3196aca..4a42924 100644 --- a/README.md +++ b/README.md @@ -359,9 +359,8 @@ To use the lambda to start the Flink application, update the file `claims/manage kubectl apply -f claims/managed-flink-claim.yaml ``` -Visit the AWS Managed Flink applications page in the web console. When the application statis is `Ready`, then uncomment the `startAppication: true` line in the `managed-flink-claim.yaml` file and re-run the `kubectl apply -f claims/managed-flink-claim.yaml` command. If the initial claim apply is performed with `startApplication: true` then Crossplane appears to go into a loop where it updates the application every few minutes, and so it switches back and forth between `Running` and `Updating` :( +If using the lambda to start the Flink application, no further action should be required. Otherwise, visit the AWS Managed Flink applications page in the web console. When the application statis is `Ready`, then uncomment the `startAppication: true` line in the `managed-flink-claim.yaml` file and re-run the `kubectl apply -f claims/managed-flink-claim.yaml` command. If the initial claim apply is performed with `startApplication: true` then Crossplane appears to go into a loop where it updates the application every few minutes, and so it switches back and forth between `Running` and `Updating` :( -> If using the `flink-lambda` composition, no further action should be required to transition the Flink application to the `Running` state. Wait until the Flink application is in the 'Running' state, then execute the following commands to send events and see the results: From 6fd45de91026f85ec3129a86f5e14ad3036cb9e2 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 2 Jan 2025 13:30:55 -1000 Subject: [PATCH 17/28] Cleanup and various fixes --- .gitignore | 3 +- README.md | 179 +++---- ...poc-send-events.sh => demo-send-events.sh} | 10 +- aws-cloudformation/demo-tail-egress.sh | 25 + .../demo-tail-logs.sh | 10 +- ...t.yaml => flink-cf-demo-bucket-stack.yaml} | 6 +- ...link-poc.yaml => flink-cf-demo-stack.yaml} | 66 ++- aws-cloudformation/poc-get-events.sh | 10 - aws-crossplane/NOTES.md | 54 -- aws-crossplane/claims/demo-setup-claims.yaml | 10 +- .../claims/managed-flink-claim.yaml | 13 +- .../aws/cert-creation/job.yaml | 0 .../{local => cloud}/aws/crossplane.yaml | 0 .../aws/manifests/aws-services.yaml | 0 .../{local => cloud}/aws/manifests/core.yaml | 0 .../aws/manifests/credentials.yaml | 0 .../patch-and-transform-function.yaml | 0 .../aws/manifests/provider-config.yaml | 0 .../aws/manifests/secret.yaml | 0 .../{local => cloud}/aws/providers.yaml | 0 .../aws/update_credentials.sh | 0 .../localstack/cert-creation/job.yaml | 0 .../localstack/configs/function.yaml | 0 .../localstack/configs/local-secret.yaml | 0 .../configs/provider-config-localstack.yaml | 0 .../localstack/configs/providers.yaml | 0 .../localstack/configs/services.yaml | 0 .../localstack/crossplane-configs.yaml | 0 .../localstack/crossplane.yaml | 0 .../localstack/localstack.yaml | 0 .../port-forward-idp-localstack.sh | 1 + ...poc-send-events.sh => demo-send-events.sh} | 2 +- aws-crossplane/demo-tail-egress.sh | 23 + .../demo-tail-logs.sh | 11 +- aws-crossplane/poc-get-events.sh | 10 - aws-crossplane/resources/flink/README.md | 4 - .../flink/flink-basic-example-claim.yaml | 26 - ...{flink-basic-comp.yaml => flink-comp.yaml} | 14 +- ...le-claim.yaml => flink-example-claim.yaml} | 6 +- .../resources/flink/flink-lambda-comp.yaml | 463 ------------------ aws-crossplane/resources/s3/index.js | 8 - .../resources/s3/s3-object-comp.yaml | 60 --- .../resources/s3/s3-object-example-claim.yaml | 20 - .../resources/s3/s3-object-xrd.yaml | 43 -- aws-crossplane/start-flink-lambda/.gitignore | 4 - aws-crossplane/start-flink-lambda/README.md | 10 - .../build_start_flink_py_zip.sh | 11 - .../start-flink-lambda/start_flink.py | 48 -- aws-terraform/demo-send-events.sh | 21 + aws-terraform/demo-tail-egress.sh | 24 + aws-terraform/demo-tail-logs.sh | 40 ++ aws-terraform/main.tf | 20 +- docker-compose.yml | 1 + .../function/AbstractStatefulFunction.java | 2 +- .../function/cart/CartStatefulFunction.java | 15 +- .../product/ProductStatefulFunction.java | 6 + 56 files changed, 338 insertions(+), 941 deletions(-) rename aws-cloudformation/{poc-send-events.sh => demo-send-events.sh} (67%) create mode 100755 aws-cloudformation/demo-tail-egress.sh rename aws-crossplane/poc-tail-logs.sh => aws-cloudformation/demo-tail-logs.sh (84%) rename aws-cloudformation/{managed-flink-poc-bucket.yaml => flink-cf-demo-bucket-stack.yaml} (84%) rename aws-cloudformation/{managed-flink-poc.yaml => flink-cf-demo-stack.yaml} (83%) delete mode 100755 aws-cloudformation/poc-get-events.sh delete mode 100644 aws-crossplane/NOTES.md rename aws-crossplane/{local => cloud}/aws/cert-creation/job.yaml (100%) rename aws-crossplane/{local => cloud}/aws/crossplane.yaml (100%) rename aws-crossplane/{local => cloud}/aws/manifests/aws-services.yaml (100%) rename aws-crossplane/{local => cloud}/aws/manifests/core.yaml (100%) rename aws-crossplane/{local => cloud}/aws/manifests/credentials.yaml (100%) rename aws-crossplane/{local => cloud}/aws/manifests/patch-and-transform-function.yaml (100%) rename aws-crossplane/{local => cloud}/aws/manifests/provider-config.yaml (100%) rename aws-crossplane/{local => cloud}/aws/manifests/secret.yaml (100%) rename aws-crossplane/{local => cloud}/aws/providers.yaml (100%) rename aws-crossplane/{local => cloud}/aws/update_credentials.sh (100%) rename aws-crossplane/{local => cloud}/localstack/cert-creation/job.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/configs/function.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/configs/local-secret.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/configs/provider-config-localstack.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/configs/providers.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/configs/services.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/crossplane-configs.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/crossplane.yaml (100%) rename aws-crossplane/{local => cloud}/localstack/localstack.yaml (100%) rename aws-crossplane/{ => cloud/localstack}/port-forward-idp-localstack.sh (74%) rename aws-crossplane/{poc-send-events.sh => demo-send-events.sh} (94%) create mode 100755 aws-crossplane/demo-tail-egress.sh rename aws-cloudformation/poc-tail-logs.sh => aws-crossplane/demo-tail-logs.sh (78%) delete mode 100755 aws-crossplane/poc-get-events.sh delete mode 100644 aws-crossplane/resources/flink/README.md delete mode 100644 aws-crossplane/resources/flink/flink-basic-example-claim.yaml rename aws-crossplane/resources/flink/{flink-basic-comp.yaml => flink-comp.yaml} (96%) rename aws-crossplane/resources/flink/{flink-lambda-example-claim.yaml => flink-example-claim.yaml} (89%) delete mode 100644 aws-crossplane/resources/flink/flink-lambda-comp.yaml delete mode 100644 aws-crossplane/resources/s3/index.js delete mode 100644 aws-crossplane/resources/s3/s3-object-comp.yaml delete mode 100644 aws-crossplane/resources/s3/s3-object-example-claim.yaml delete mode 100644 aws-crossplane/resources/s3/s3-object-xrd.yaml delete mode 100644 aws-crossplane/start-flink-lambda/.gitignore delete mode 100644 aws-crossplane/start-flink-lambda/README.md delete mode 100755 aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh delete mode 100644 aws-crossplane/start-flink-lambda/start_flink.py create mode 100755 aws-terraform/demo-send-events.sh create mode 100755 aws-terraform/demo-tail-egress.sh create mode 100755 aws-terraform/demo-tail-logs.sh diff --git a/.gitignore b/.gitignore index a3389f7..99876e5 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,8 @@ dependency-reduced-pom.xml *~ .*~ .cwlogs +.next-shard-iterator .DS_Store venv* .terraform* -terraform.tfstate* \ No newline at end of file +terraform.tfstate* diff --git a/README.md b/README.md index 4a42924..aac75c2 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ branch, and build/install it locally via `mvn install` mvn package ``` -The demo can be provisioned in AWS in two ways... via CloudFormation or Crossplane +The demo can be provisioned in AWS in three ways... via CloudFormation, Terraform, or Crossplane ### Provisioning via AWS CloudFormation @@ -161,39 +161,46 @@ cd aws-cloudformation #### Create an S3 bucket and upload this project's JAR file -To create the bucket, create a CloudFormation stack named `managed-flink-code-bucket` as defined [here](./aws-cloudformation/managed-flink-poc-bucket.yaml), +To create the bucket, create a CloudFormation stack named `flink-cf-demo-bucket` as defined [here](./aws-cloudformation/flink-cf-demo-bucket-stack.yaml), and after that finishes, use the AWS CLI to upload the jar file: ```shell export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account -aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar \ - s3://managed-flink-poc-bucket-codebucket-${AWS_ACCOUNT_ID}/ +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://flink-cf-demo-bucket-${AWS_ACCOUNT_ID}/ ``` #### Create the Kinesis streams, Managed Flink application, and related AWS Resources -Create a CloudFormation stack named `managed-flink-poc` as defined by the CloudFormation templates [here](./aws-cloudformation/managed-flink-poc.yaml). +Create a CloudFormation stack named `flink-cf-demo` as defined by the CloudFormation templates [here](./aws-cloudformation/flink-cf-demo-stack.yaml). This stack includes a custom resource lambda that programmatically configures logging when the Flink application is created, and transitions the application from the Ready to Running state. #### Monitor the CloudWatch logging output +The following script will show all the log entries from the start of application launch, and will +wait for new entries to arrive and display them too. The script will resume from where it +left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. ```shell -./poc-tail-logs.sh +./demo-tail-logs.sh ``` -This script will show all the log entries from the start of application launch, and will -wait for new entries to arrive and display them too. The script will resume from where it -left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. + ### Send sample events to the ingress stream ```shell -./poc-send-events.sh +./demo-send-events.sh ``` #### Get and display the events published to the egress stream +This script will show all events published to the egress stream since the start of application launch, and will +wait for new entries to arrive and display them too. ```shell -./poc-get-events.sh +./demo-tail-egress.sh ``` +#### Cleanup +Cleanup by manually deleting the jar file from the S3 bucket and the ingress Kinesis stream. Then delete the +Cloud Formation stacks. Cloud Formation will fail to delete a non-empty bucket, and fail to delete the ingress Kinesis +stream since Flink adds a fanout consumer to the stream which will block the deletion attempted by +Cloud Formation. ### Provisioning via Terraform @@ -207,23 +214,51 @@ cd aws-terraform terraform init terraform apply # When prompted, enter 'yes' ``` -Immediately after entering 'yes' to the prompt issued by `terraform apply`, switch to another shell/terminal tab and upload the application JAR file to the S3 bucket. The upload may fail if the S3 bucket has not been created by Terraform yet, so keep trying until it succeeds. +Immediately after entering 'yes' to the prompt issued by `terraform apply`, switch to another shell/terminal tab and +upload the application JAR file to the S3 bucket. The upload may fail if the S3 bucket has not been created by +Terraform yet, so keep trying until it succeeds. ```shell export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar \ s3://flink-demo-bucket-${AWS_ACCOUNT_ID}/ ``` +Wait for the `terraform apply` command to complete. -Follow the directions near the end of the Crossplane section, below, regarding sending sample events. Use the scripts in the `aws-crossplane` directory to send the sample input events, get the events written to the egress stream, and view the Flink application logging output. +#### Monitor the CloudWatch logging output -Cleanup by deleting the jar file from the S3 bucket, `flink-demo-bucket-${AWS_ACCOUNT_ID}` and running the command: +The following script will show all the log entries from the start of application launch, and will +wait for new entries to arrive and display them too. The script will resume from where it +left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. +```shell +./demo-tail-logs.sh +``` +#### Send sample events to the ingress stream ```shell -terraform destroy # When prompted, enter 'yes' +./demo-send-events.sh ``` -The Kinesis stream `flink-demo-ingress` must be manually deleted since Flink adds a Fanout consumer to the stream, and the consumer will block deletion. +#### Get and display the events published to the egress stream +This script will show all events published to the egress stream since the start of application launch, and will +wait for new entries to arrive and display them too. +```shell +./demo-tail-egress.sh +``` + +#### Cleanup +Cleanup by manually deleting the jar file from the S3 bucket, `flink-demo-bucket-${AWS_ACCOUNT_ID}`, and the Kinesis +stream `flink-tf-demo-ingress`. Run the `terraform destroy` command. Note that the manual deletions are required +since Terraform can't delete a non-empty bucket, and can't delete the ingress stream since Flink adds a fanout consumer +to the stream which will block the deletion attempted by Terraform. + +Alternatively, you can run the following commands to clean up the resources: +```shell +export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account +aws s3 rm --recursive s3://flink-tf-demo-bucket-${AWS_ACCOUNT_ID}/ +aws kinesis delete-stream --enforce-consumer-deletion --stream-name flink-tf-demo-ingress +terraform destroy # When prompted, enter 'yes' +``` ### Provisioning via Crossplane @@ -239,56 +274,20 @@ This demo of provisioning via Crossplane is nowhere near production quality. It to provision and run an AWS Managed Flink application via crossplane. Many tasks normally performed via CI/CD must be completed manually as described below. The crossplane compositions currently use `function-patch-and-transform` instead of a custom composition function, and because of that, many things in the compositions remain hard-coded (AWS account -number, region, ARNs in IAM roles, etc). In production systems, the lambda and related infrastructure that auto-starts -the Flink application probably only needs to be installed once per AWS account, and as such those resources should be -provisioned via a separate claim. Also, see my note below regarding the creation of a CloudWatch log group for the lambda. +number, region, ARNs in IAM roles, etc). #### Instructions The files to run the crossplane demo are in the [aws-crossplane](./aws-crossplane) directory. - -Skip the instructions for using a lambda to start the Flink application. Go to [here instead](#start-the-local-idp-configured-to-use-aws) - -##### Build the lambda handler package. What? A lambda? - -> :warning: IMPORTANT: Using the lambda is optional and not recommended. - -> At one point it appeared that the managed resource for Flink wouldn't start the Flink application, and that like the CloudWatch -approach, a lambda is needed to handle events from the AWS resource and transition the application to the `Running` state. This -is not actually the case, but support for the lambda is still included. To use the lambda, it must be packaged and uploaded to the -S3 bucket, and the managed flink claim must select the 'lambda' composition. - -> When using the [flink-lambda](./aws-crossplane/resources/flink/flink-lambda-comp.yaml) composition, -the [managed resource for creating AWS Managed Flink applications](https://marketplace.upbound.io/providers/upbound/provider-aws-kinesisanalyticsv2/v1.17.0/resources/kinesisanalyticsv2.aws.upbound.io/Application/v1beta1) -will do most of the work to get the Flink application provisioned, and the application will -become 'Ready' (not 'Running'). In this case, the lambda will -invoke an API call to start the application. This is in following with how it works when provisioning via CloudFormation. -In CloudFormation though, the Lambda code can be inlined in a CloudFormation template, but in Crossplane the Lambda code must be -referenced separately, e.g., via reference to the lambda package in an S3 file. - -> Build the lambda package by following [the instructions here](./aws-crossplane/start-flink-lambda/README.md). The resulting Zip file will be -uploaded to S3 later, as you follow the steps below. - -> The lambda will be provisioned along with AWS Managed Flink via a single claim, below. - -##### Create the CloudWatch log group for the lambda - -> :warning: OPTIONAL: Follow these instructions only if you are using the lambda to start the Flink application - -Login to AWS Identity Center and launch the web console for the Sandbox account. - -Confirm the existence of, and create if necessary, the CloudWatch log group `/aws/lambda/flink-demo-starter`. I can't -figure out how to do this using the managed resource provided by `provider-aws-cloudwatchlogs` because the log group -for the lambda must be named exactly that, the MR doesn't provide a way to set the name explicitly, and k8s/crossplane -doesn't like the slashes in `metadata.name`. - -##### Start the local IDP configured to use AWS ``` cd aws-crossplane ``` -Login to AWS Identity Center, and copy the AWS environment variable commands from the IL Sandbox account, Access Keys page. + +##### Start the local IDP configured to use AWS + +Login to AWS Identity Center, and copy the AWS credential environment variables commands from Access Keys page. Paste and execute the AWS environment variable commands, then run this script: @@ -299,10 +298,11 @@ Paste and execute the AWS environment variable commands, then run this script: Launch the local IDP using idpbuilder (https://github.com/cnoe-io/idpbuilder) ``` -idpbuilder create -p ./local/aws +idpbuilder create -p ./cloud/aws ``` -The `idpbuilder create` command takes a few minutes to complete, and even then it will take more time for crossplane to start and the providers to be loaded. +The `idpbuilder create` command takes a few minutes to complete, and even then it will take more time for crossplane to +start and the providers to be loaded. Wait for the AWS providers to finish loading... @@ -313,13 +313,15 @@ kubectl -n crossplane-system get pods | grep provider-aws Wait until the command above returns a list of pods all in the `Running` state. ##### Install the Crossplane resources (XRDs and Compositions) -Install the Composite Resource Definitions and Compositions required by the demo. Ignore the warnings issued by the following command: +Install the Composite Resource Definitions and Compositions required by the demo. Ignore the warnings issued by the +following command: ``` for i in $(find resources -name \*xrd.yaml -o -name \*comp.yaml); do k apply -f $i; done ``` -At the time of this writing the demo does not utilize a custom composition function. Instead, it uses the off-the-shelf function `function-patch-and-transform` which gets loaded during IDP creation, above. +At the time of this writing the demo does not utilize a custom composition function. Instead, it uses the off-the-shelf +function `function-patch-and-transform` which gets loaded during IDP creation, above. ##### Provision AWS Managed Flink via Crossplane claims @@ -334,57 +336,70 @@ kubectl get managed ``` The output of `kubectl get managed` will reveal the actual S3 bucket name under `EXTERNAL-NAME`. -Return to AWS Identity Center and launch the web console for the Sandbox account. +Return to AWS Identity Center and launch the web console for the account. -Visit the S3 services page. Find the S3 bucket (flink-demo-bucket-*) and upload the following files to the bucket +Visit the S3 services page. Find the S3 bucket (flink-demo-bucket-*) and upload the following file to the bucket - `../target/my-stateful-functions-embedded-java-3.3.0.jar` (Flink demo application code) -- `start-flink-lambda/start_flink_py.zip` (Optional, lambda handler code which transitions the Managed Flink instance to the 'Running' state) -Alternatively, use the AWS CLI to upload the files... +Alternatively, use the AWS CLI to upload the file... ``` flink_bucket_name=$(kubectl get managed | grep bucket | awk '{print $4}') -aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://${flink_bucket_name}/my-stateful-functions-embedded-java-3.3.0.jar - -# If using the lambda to start the Flink application, upload the lambda package -aws s3 cp start-flink-lambda/start_flink_py.zip s3://${flink_bucket_name}/start_flink_py.zip +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://${flink_bucket_name}/ ``` ##### Provision the Managed Flink application -Applying the following claim will trigger the creation of the Flink application, its role, and log groups. Note that by default Flink application will become 'Ready' since `startApplication: true` is commented-out in the claim. - -To use the lambda to start the Flink application, update the file `claims/managed-flink-claim.yaml` and change the value for `appReadyHandler` to `lambda`. +Apply the following claim to trigger the creation of the Flink application, its role, and log groups. Note that by +default Flink application will become 'Ready' since `startApplication: true` is commented-out in the claim. Do not +uncomment this line yet. ``` kubectl apply -f claims/managed-flink-claim.yaml ``` -If using the lambda to start the Flink application, no further action should be required. Otherwise, visit the AWS Managed Flink applications page in the web console. When the application statis is `Ready`, then uncomment the `startAppication: true` line in the `managed-flink-claim.yaml` file and re-run the `kubectl apply -f claims/managed-flink-claim.yaml` command. If the initial claim apply is performed with `startApplication: true` then Crossplane appears to go into a loop where it updates the application every few minutes, and so it switches back and forth between `Running` and `Updating` :( +Visit the AWS Managed Flink applications page in the web console. When the application status becomes `Ready`, +uncomment the `startAppication: true` line in the `managed-flink-claim.yaml` file and re-run +the `kubectl apply -f claims/managed-flink-claim.yaml` command. If the initial claim apply is performed +with `startApplication: true` then Crossplane appears to go into a loop where it updates the application every few +minutes, and so it switches back and forth between `Running` and `Updating` :( -Wait until the Flink application is in the 'Running' state, then execute the following commands to send events and see the results: +Wait until the Flink application is in the 'Running' state. This may take a few minutes. +#### Monitor the CloudWatch logging output + +The following script will show all the log entries from the start of application launch, and will +wait for new entries to arrive and display them too. The script will resume from where it +left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. +```shell +./demo-tail-logs.sh +``` + +#### Send sample events to the ingress stream +```shell +./demo-send-events.sh ``` -# Send the test events -./poc-send-events.sh -# Fetch and display the results from the egress stream -./poc-get-events.sh +#### Get and display the events published to the egress stream +This script will show all events published to the egress stream since the start of application launch, and will +wait for new entries to arrive and display them too. +```shell +./demo-tail-egress.sh ``` #### Cleanup +Manually delete the files in the S3 bucket, and delete the Kinesis stream `flink-demo-ingress` (the Flink application +adds a fanout consumer to the stream which will block any deletion attempted by Crossplane). + +Run the following commands to delete the remaining resources: ``` kubectl delete -f resources/claims/managed-flink-claims.yaml kubectl delete -f resources/claims/demo-setup-claims.yaml ``` -Visit the S3 bucket in the web console and delete the files in the bucket. Having issued the `kubectl delete` command on the demo setup claims will trigger the bucket to be deleted automatically soon after it is emptied. - Shut down the local IDP with the command: ``` idpbuilder delete ``` -Manually remove the CloudWatch log group `/aws/lambda/flink-demo-starter`. - diff --git a/aws-cloudformation/poc-send-events.sh b/aws-cloudformation/demo-send-events.sh similarity index 67% rename from aws-cloudformation/poc-send-events.sh rename to aws-cloudformation/demo-send-events.sh index 9d874ed..7699690 100755 --- a/aws-cloudformation/poc-send-events.sh +++ b/aws-cloudformation/demo-send-events.sh @@ -1,9 +1,15 @@ #! /bin/bash -stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep ManagedFlinkIngressStream) +if [ $(uname) = "Darwin" ]; then + MD5SUM=md5 +else + MD5SUM=md5sum +fi + +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep FlinkCfDemoIngressStream) grep -v test.action ../src/test/resources/product-cart-integration-test-events.jsonl | while read line; do - partkey=$(echo $line | md5sum | awk '{print $1}') + partkey=$(echo $line | $MD5SUM | awk '{print $1}') data=$(echo $line | base64) cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" echo $cmd diff --git a/aws-cloudformation/demo-tail-egress.sh b/aws-cloudformation/demo-tail-egress.sh new file mode 100755 index 0000000..f198bb8 --- /dev/null +++ b/aws-cloudformation/demo-tail-egress.sh @@ -0,0 +1,25 @@ +#! /bin/bash + +set -e + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep FlinkCfDemoEgressStream) + +get_records_response=$(mktemp) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +while [ "true" ]; do + aws kinesis get-records --shard-iterator $shard_iterator >$get_records_response + shard_iterator=$(cat $get_records_response | jq -crM .NextShardIterator) + record_count=0 + for encoded_data in $(cat $get_records_response | jq -crM .Records[].Data); do + record_count=$(expr $record_count + 1) + echo $encoded_data | base64 -d | jq . + done + if [ $record_count -eq 0 ]; then + sleep 2 + fi +done + + diff --git a/aws-crossplane/poc-tail-logs.sh b/aws-cloudformation/demo-tail-logs.sh similarity index 84% rename from aws-crossplane/poc-tail-logs.sh rename to aws-cloudformation/demo-tail-logs.sh index faa7ed0..a2009fe 100755 --- a/aws-crossplane/poc-tail-logs.sh +++ b/aws-cloudformation/demo-tail-logs.sh @@ -18,11 +18,11 @@ fi while true; do CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json aws logs get-log-events \ - --start-from-head \ - $NEXT_TOKEN_ARG \ - --log-group-name flink-demo-app-log-group \ - --log-stream-name flink-demo-app-log-stream \ - >$CWLOG_FILE + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name FlinkCfDemoLogGroup \ + --log-stream-name FlinkCfDemoLogStream \ + >$CWLOG_FILE NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) echo $NEXT_TOKEN >$CWLOGS_DIR/next.token diff --git a/aws-cloudformation/managed-flink-poc-bucket.yaml b/aws-cloudformation/flink-cf-demo-bucket-stack.yaml similarity index 84% rename from aws-cloudformation/managed-flink-poc-bucket.yaml rename to aws-cloudformation/flink-cf-demo-bucket-stack.yaml index ce52994..9fc4e11 100644 --- a/aws-cloudformation/managed-flink-poc-bucket.yaml +++ b/aws-cloudformation/flink-cf-demo-bucket-stack.yaml @@ -5,7 +5,7 @@ Resources: CodeBucket: Type: AWS::S3::Bucket Properties: - BucketName: !Sub ${AWS::StackName}-codebucket-${AWS::AccountId} + BucketName: !Sub ${AWS::StackName}-${AWS::AccountId} BucketEncryption: ServerSideEncryptionConfiguration: - ServerSideEncryptionByDefault: @@ -29,7 +29,7 @@ Resources: Bool: aws:SecureTransport: 'false' Outputs: - ManagedFlinkCodeBucketArn: + FlinkCfDemoCodeBucketArn: Value: !GetAtt CodeBucket.Arn Export: - Name: ManagedFlinkCodeBucketArn # Exported for use by the stack defined in managed-flink-poc.yaml + Name: FlinkCfDemoCodeBucketArn # Exported for use by the stack defined in flink-cf-demo-stack.yaml diff --git a/aws-cloudformation/managed-flink-poc.yaml b/aws-cloudformation/flink-cf-demo-stack.yaml similarity index 83% rename from aws-cloudformation/managed-flink-poc.yaml rename to aws-cloudformation/flink-cf-demo-stack.yaml index 46ad4c6..2073ed2 100644 --- a/aws-cloudformation/managed-flink-poc.yaml +++ b/aws-cloudformation/flink-cf-demo-stack.yaml @@ -1,7 +1,7 @@ # A CloudFormation stack containing all resources except for the S3 bucket containing the statefun application JAR. Description: "Stack to run Managed Flink proof-of-concept. Contact: Ken Ellinwood" Resources: - ManagedFlinkIngressStream: + FlinkCfDemoIngressStream: Type: AWS::Kinesis::Stream Properties: ShardCount: 1 @@ -10,7 +10,7 @@ Resources: KeyId: alias/aws/kinesis StreamModeDetails: StreamMode: PROVISIONED - ManagedFlinkEgressStream: + FlinkCfDemoEgressStream: Type: AWS::Kinesis::Stream Properties: ShardCount: 1 @@ -19,24 +19,22 @@ Resources: KeyId: alias/aws/kinesis StreamModeDetails: StreamMode: PROVISIONED - ManagedFlinkLogGroup: + FlinkCfDemoLogGroup: Type: 'AWS::Logs::LogGroup' Properties: - LogGroupName: - !Sub ${AWS::StackName}-log-group-${AWS::AccountId} + LogGroupName: FlinkCfDemoLogGroup RetentionInDays: 7 UpdateReplacePolicy: Delete DeletionPolicy: Delete - ManagedFlinkLogStream: + FlinkCfDemoLogStream: Type: 'AWS::Logs::LogStream' Properties: LogGroupName: - Ref: ManagedFlinkLogGroup - LogStreamName: - !Sub ${AWS::StackName}-log-stream-${AWS::AccountId} + Ref: FlinkCfDemoLogGroup + LogStreamName: FlinkCfDemoLogStream UpdateReplacePolicy: Delete DeletionPolicy: Delete - ManagedFlinkIAMRole: + FlinkCfDemoIAMRole: Type: AWS::IAM::Role Properties: AssumeRolePolicyDocument: @@ -63,10 +61,10 @@ Resources: Effect: Allow Resource: - 'Fn::GetAtt': - - ManagedFlinkIngressStream + - FlinkCfDemoIngressStream - Arn - 'Fn::GetAtt': - - ManagedFlinkEgressStream + - FlinkCfDemoEgressStream - Arn Version: '2012-10-17' PolicyName: AccessKDSPolicy @@ -79,7 +77,7 @@ Resources: Effect: Allow Resource: 'Fn::GetAtt': - - ManagedFlinkLogGroup + - FlinkCfDemoLogGroup - Arn Version: '2012-10-17' PolicyName: AccessCWLogsPolicy @@ -90,26 +88,26 @@ Resources: Resource: '*' Version: '2012-10-17' PolicyName: AccessCWMetricsPolicy - ManagedFlinkApplication: + FlinkCfDemoApplication: Type: AWS::KinesisAnalyticsV2::Application Properties: - ApplicationName: 'ManagedFlinkPOCApplication' - ApplicationDescription: 'Managed Flink POC Application' + ApplicationName: !Sub ${AWS::StackName}-application + ApplicationDescription: 'Managed Flink demo application provisioned via CloudFormation' RuntimeEnvironment: 'FLINK-1_18' - ServiceExecutionRole: !GetAtt ManagedFlinkIAMRole.Arn + ServiceExecutionRole: !GetAtt FlinkCfDemoIAMRole.Arn ApplicationConfiguration: EnvironmentProperties: PropertyGroups: - PropertyGroupId: 'StatefunApplicationProperties' PropertyMap: - EVENTS_INGRESS_STREAM_DEFAULT: !Ref ManagedFlinkIngressStream - EVENTS_EGRESS_STREAM_DEFAULT: !Ref ManagedFlinkEgressStream + EVENTS_INGRESS_STREAM_DEFAULT: !Ref FlinkCfDemoIngressStream + EVENTS_EGRESS_STREAM_DEFAULT: !Ref FlinkCfDemoEgressStream AWS_REGION: !Ref AWS::Region FlinkApplicationConfiguration: CheckpointConfiguration: ConfigurationType: 'CUSTOM' CheckpointingEnabled: True - CheckpointInterval: 900000 # Every fifteen minutes + CheckpointInterval: 60000 # Every minute # Increase this to 300000 in production (every 5 minutes) MinPauseBetweenCheckpoints: 500 MonitoringConfiguration: ConfigurationType: 'CUSTOM' @@ -125,26 +123,26 @@ Resources: ApplicationCodeConfiguration: CodeContent: S3ContentLocation: - BucketARN: !ImportValue ManagedFlinkCodeBucketArn # Created and exported by the stack defined in managed-flink-poc-bucket.yaml - FileKey: "my-stateful-functions-embedded-java-3.3.0.jar" + BucketARN: !ImportValue FlinkCfDemoCodeBucketArn # Created and exported by the stack defined in flink-cf-demo-bucket-stack.yaml + FileKey: "my-stateful-functions-embedded-java-3.3.1.jar" CodeContentType: 'ZIPFILE' - ManagedFlinkCustomResource: - Description: Invokes ManagedFlinkCRLambda to update and start the Flink application via API calls + FlinkCfDemoCustomResource: + Description: Invokes FlinkCfDemoCRLambda to update and start the Flink application via API calls Type: AWS::CloudFormation::CustomResource - DependsOn: ManagedFlinkCRLambda + DependsOn: FlinkCfDemoCRLambda Version: "1.0" Properties: - ServiceToken: !GetAtt ManagedFlinkCRLambda.Arn + ServiceToken: !GetAtt FlinkCfDemoCRLambda.Arn Region: !Ref AWS::Region - ApplicationName: !Ref ManagedFlinkApplication + ApplicationName: !Ref FlinkCfDemoApplication # LogStream ARN format: arn:aws:logs:REGION:ACCOUNT_NUMBER:log-group:LOG_GROUP_NAME:log-stream:LOG_STREAM_NAME # We get most of this from the LogGroup ARN, then remove the trailing "*" and append "log-stream:LOG_STREAM_NAME" - LogStreamArn: !Join [ "", [ !Select [ 0, !Split [ "*", !GetAtt ManagedFlinkLogGroup.Arn ] ], "log-stream:", !Ref ManagedFlinkLogStream ] ] - ManagedFlinkCRLambdaRole: + LogStreamArn: !Join [ "", [ !Select [ 0, !Split [ "*", !GetAtt FlinkCfDemoLogGroup.Arn ] ], "log-stream:", !Ref FlinkCfDemoLogStream ] ] + FlinkCfDemoCRLambdaRole: Type: AWS::IAM::Role DependsOn: - - ManagedFlinkApplication - - ManagedFlinkLogStream + - FlinkCfDemoApplication + - FlinkCfDemoLogStream Properties: Description: A role for the custom resource lambda to use while interacting with an application. AssumeRolePolicyDocument: @@ -160,13 +158,13 @@ Resources: - arn:aws:iam::aws:policy/AmazonKinesisAnalyticsFullAccess - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess Path: / - ManagedFlinkCRLambda: + FlinkCfDemoCRLambda: Type: AWS::Lambda::Function - DependsOn: ManagedFlinkCRLambdaRole + DependsOn: FlinkCfDemoCRLambdaRole Properties: Description: Configures logging and starts the Flink application Runtime: python3.8 - Role: !GetAtt ManagedFlinkCRLambdaRole.Arn + Role: !GetAtt FlinkCfDemoCRLambdaRole.Arn Handler: index.lambda_handler Timeout: 30 Code: diff --git a/aws-cloudformation/poc-get-events.sh b/aws-cloudformation/poc-get-events.sh deleted file mode 100755 index b1291d5..0000000 --- a/aws-cloudformation/poc-get-events.sh +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/bash - -# Get the events sent to the egress stream -stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep ManagedFlinkEgressStream) - -shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) -shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) -for encoded_data in $(aws kinesis get-records --shard-iterator $shard_iterator | jq -crM .Records[].Data); do - echo $encoded_data | base64 -d | jq . -done diff --git a/aws-crossplane/NOTES.md b/aws-crossplane/NOTES.md deleted file mode 100644 index e90db68..0000000 --- a/aws-crossplane/NOTES.md +++ /dev/null @@ -1,54 +0,0 @@ - -I initially put the stream ARN values in the environment section of (the managed flink claim)[./claims/mananged-flink-claim.yaml]. -Just the plain stream names are required, however after updating the values in the claim and applying the change, I see this -error in the output of `kubectl describe application.kinesisanalyticsv2.aws.upbound.io/flink-demo-application`... - -``` -Warning CannotUpdateExternalResource 4m19s (x14 over 6m31s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application -(combined from similar events): async update failed: failed to update the resource: [{0 updating Kinesis Analytics v2 Application -(arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application): operation error Kinesis Analytics V2: -UpdateApplication, https response error StatusCode: 400, RequestID: 39586af4-c1cc-4515-b818-c86f8f176671, -InvalidApplicationConfigurationException: Failed to take snapshot for the application flink-demo-application at this moment. -The application is currently experiencing downtime. Please check the application's CloudWatch metrics or CloudWatch -logs for any possible errors and retry the request. You can also retry the request after disabling the snapshots in -the Kinesis Data Analytics console or by updating the ApplicationSnapshotConfiguration through the AWS SDK. []}] -``` - -It appears that the snapshot issue is preventing the update that would fix the snapshot issue :( - - -I then tried to delete the claim and re-apply it as soon as the managed resources disappeared and the Flink app no longer -showed in the AWS console, but the new app got stuck on this: - -``` -Warning CannotCreateExternalResource 51s (x39 over 4m41s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application -(combined from similar events): async create failed: failed to create the resource: [{0 creating Kinesis Analytics v2 Application -(flink-demo-application): operation error Kinesis Analytics V2: CreateApplication, https response error StatusCode: 400, -RequestID: 64366786-9f40-440f-8fcd-c3376f0cc619, ConcurrentModificationException: Tags are already registered for this -resource ARN: arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application, please retry later. -Or you can create without tags and then add tags using TagResource API after successful resource creation. []}] -``` - -Third try after waiting longer between delete and apply... - -``` -Normal UpdatedExternalResource 99s (x2 over 5m18s) managed/kinesisanalyticsv2.aws.upbound.io/v1beta1, kind=application Successfully requested update of external resource -``` - -But in the AWS Console, the app seems stuck with the 'Updating' status. OK, waited a bit and it's now 'Running', except... - -``` -{ - "applicationARN": "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application", - "applicationVersionId": "3", - "locationInformation": "org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.translateDeleteException(MultiObjectDeleteSupport.java:107)", - "logger": "org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport", - "message": "AccessDenied: b97af75851aadd301cb2f64ad11c0ef0-516535517513-1733357577755/: User: arn:aws:sts::695788120607:assumed-role/AWSKinesisAnalyticsKubern-S3CustomerAppStateAccess-LZ083MW7K490/FlinkApplicationStateSession is not authorized to perform: s3:DeleteObject on resource: \"arn:aws:s3:::cc75a9b61f353980b2f0360aaee434149a950968/b97af75851aadd301cb2f64ad11c0ef0-516535517513-1733357577755/\" because no session policy allows the s3:DeleteObject action\n", - "messageSchemaVersion": "1", - "messageType": "WARN", - "threadName": "s3a-transfer-cc75a9b61f353980b2f0360aaee434149a950968-unbounded-pool2-t16" -} -``` - -And were back on 'Updating' status w/o doing anything except to go look at the logs, where I saw an error about not -having permissions to delete objects from S3, and while I was typing this it went back to 'Running' status. diff --git a/aws-crossplane/claims/demo-setup-claims.yaml b/aws-crossplane/claims/demo-setup-claims.yaml index 23761d4..85ece87 100644 --- a/aws-crossplane/claims/demo-setup-claims.yaml +++ b/aws-crossplane/claims/demo-setup-claims.yaml @@ -2,7 +2,7 @@ apiVersion: kellinwood.com/v1alpha1 kind: S3Bucket metadata: - name: flink-demo-bucket + name: flink-cp-demo-bucket namespace: default spec: resourceConfig: @@ -11,12 +11,12 @@ spec: apiVersion: kellinwood.com/v1alpha1 kind: KinesisStream metadata: - name: flink-demo-ingress + name: flink-cp-demo-ingress namespace: default spec: resourceConfig: region: us-east-2 - name: flink-demo-ingress + name: flink-cp-demo-ingress streamMode: PROVISIONED shardCount: 1 retentionPeriod: 26 @@ -27,12 +27,12 @@ spec: apiVersion: kellinwood.com/v1alpha1 kind: KinesisStream metadata: - name: flink-demo-egress + name: flink-cp-demo-egress namespace: default spec: resourceConfig: region: us-east-2 - name: flink-demo-egress + name: flink-cp-demo-egress streamMode: PROVISIONED shardCount: 1 retentionPeriod: 26 diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index e4d033b..ae4fad1 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -1,13 +1,13 @@ apiVersion: kellinwood.com/v1alpha1 kind: ManagedFlink metadata: - name: flink-demo + name: flink-cp-demo namespace: default spec: resourceConfig: region: us-east-2 - name: flink-demo - codeBucket: flink-demo-bucket + name: flink-cp-demo + codeBucket: flink-cp-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 # startApplication: true @@ -16,9 +16,6 @@ spec: - propertyGroup: - propertyGroupId: StatefunApplicationProperties propertyMap: - EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress - EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress + EVENTS_INGRESS_STREAM_DEFAULT: flink-cp-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: flink-cp-demo-egress AWS_REGION: us-east-2 - compositionSelector: - matchLabels: - appReadyHandler: none # or maybe 'lambda'. See ../../README.md#build-the-lambda-handler-package--what-a-lambda \ No newline at end of file diff --git a/aws-crossplane/local/aws/cert-creation/job.yaml b/aws-crossplane/cloud/aws/cert-creation/job.yaml similarity index 100% rename from aws-crossplane/local/aws/cert-creation/job.yaml rename to aws-crossplane/cloud/aws/cert-creation/job.yaml diff --git a/aws-crossplane/local/aws/crossplane.yaml b/aws-crossplane/cloud/aws/crossplane.yaml similarity index 100% rename from aws-crossplane/local/aws/crossplane.yaml rename to aws-crossplane/cloud/aws/crossplane.yaml diff --git a/aws-crossplane/local/aws/manifests/aws-services.yaml b/aws-crossplane/cloud/aws/manifests/aws-services.yaml similarity index 100% rename from aws-crossplane/local/aws/manifests/aws-services.yaml rename to aws-crossplane/cloud/aws/manifests/aws-services.yaml diff --git a/aws-crossplane/local/aws/manifests/core.yaml b/aws-crossplane/cloud/aws/manifests/core.yaml similarity index 100% rename from aws-crossplane/local/aws/manifests/core.yaml rename to aws-crossplane/cloud/aws/manifests/core.yaml diff --git a/aws-crossplane/local/aws/manifests/credentials.yaml b/aws-crossplane/cloud/aws/manifests/credentials.yaml similarity index 100% rename from aws-crossplane/local/aws/manifests/credentials.yaml rename to aws-crossplane/cloud/aws/manifests/credentials.yaml diff --git a/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml b/aws-crossplane/cloud/aws/manifests/patch-and-transform-function.yaml similarity index 100% rename from aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml rename to aws-crossplane/cloud/aws/manifests/patch-and-transform-function.yaml diff --git a/aws-crossplane/local/aws/manifests/provider-config.yaml b/aws-crossplane/cloud/aws/manifests/provider-config.yaml similarity index 100% rename from aws-crossplane/local/aws/manifests/provider-config.yaml rename to aws-crossplane/cloud/aws/manifests/provider-config.yaml diff --git a/aws-crossplane/local/aws/manifests/secret.yaml b/aws-crossplane/cloud/aws/manifests/secret.yaml similarity index 100% rename from aws-crossplane/local/aws/manifests/secret.yaml rename to aws-crossplane/cloud/aws/manifests/secret.yaml diff --git a/aws-crossplane/local/aws/providers.yaml b/aws-crossplane/cloud/aws/providers.yaml similarity index 100% rename from aws-crossplane/local/aws/providers.yaml rename to aws-crossplane/cloud/aws/providers.yaml diff --git a/aws-crossplane/local/aws/update_credentials.sh b/aws-crossplane/cloud/aws/update_credentials.sh similarity index 100% rename from aws-crossplane/local/aws/update_credentials.sh rename to aws-crossplane/cloud/aws/update_credentials.sh diff --git a/aws-crossplane/local/localstack/cert-creation/job.yaml b/aws-crossplane/cloud/localstack/cert-creation/job.yaml similarity index 100% rename from aws-crossplane/local/localstack/cert-creation/job.yaml rename to aws-crossplane/cloud/localstack/cert-creation/job.yaml diff --git a/aws-crossplane/local/localstack/configs/function.yaml b/aws-crossplane/cloud/localstack/configs/function.yaml similarity index 100% rename from aws-crossplane/local/localstack/configs/function.yaml rename to aws-crossplane/cloud/localstack/configs/function.yaml diff --git a/aws-crossplane/local/localstack/configs/local-secret.yaml b/aws-crossplane/cloud/localstack/configs/local-secret.yaml similarity index 100% rename from aws-crossplane/local/localstack/configs/local-secret.yaml rename to aws-crossplane/cloud/localstack/configs/local-secret.yaml diff --git a/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml b/aws-crossplane/cloud/localstack/configs/provider-config-localstack.yaml similarity index 100% rename from aws-crossplane/local/localstack/configs/provider-config-localstack.yaml rename to aws-crossplane/cloud/localstack/configs/provider-config-localstack.yaml diff --git a/aws-crossplane/local/localstack/configs/providers.yaml b/aws-crossplane/cloud/localstack/configs/providers.yaml similarity index 100% rename from aws-crossplane/local/localstack/configs/providers.yaml rename to aws-crossplane/cloud/localstack/configs/providers.yaml diff --git a/aws-crossplane/local/localstack/configs/services.yaml b/aws-crossplane/cloud/localstack/configs/services.yaml similarity index 100% rename from aws-crossplane/local/localstack/configs/services.yaml rename to aws-crossplane/cloud/localstack/configs/services.yaml diff --git a/aws-crossplane/local/localstack/crossplane-configs.yaml b/aws-crossplane/cloud/localstack/crossplane-configs.yaml similarity index 100% rename from aws-crossplane/local/localstack/crossplane-configs.yaml rename to aws-crossplane/cloud/localstack/crossplane-configs.yaml diff --git a/aws-crossplane/local/localstack/crossplane.yaml b/aws-crossplane/cloud/localstack/crossplane.yaml similarity index 100% rename from aws-crossplane/local/localstack/crossplane.yaml rename to aws-crossplane/cloud/localstack/crossplane.yaml diff --git a/aws-crossplane/local/localstack/localstack.yaml b/aws-crossplane/cloud/localstack/localstack.yaml similarity index 100% rename from aws-crossplane/local/localstack/localstack.yaml rename to aws-crossplane/cloud/localstack/localstack.yaml diff --git a/aws-crossplane/port-forward-idp-localstack.sh b/aws-crossplane/cloud/localstack/port-forward-idp-localstack.sh similarity index 74% rename from aws-crossplane/port-forward-idp-localstack.sh rename to aws-crossplane/cloud/localstack/port-forward-idp-localstack.sh index f651c8e..6fd2316 100644 --- a/aws-crossplane/port-forward-idp-localstack.sh +++ b/aws-crossplane/cloud/localstack/port-forward-idp-localstack.sh @@ -1,5 +1,6 @@ #! /bin/sh +# Port-forward port 4566 to the LocalStack service running in the Kubernetes cluster # This script requires the jq command: https://jqlang.github.io/jq/ localstack_pod_name=$(kubectl -n localstack get pods -o json | jq -cr .items[0].metadata.name) diff --git a/aws-crossplane/poc-send-events.sh b/aws-crossplane/demo-send-events.sh similarity index 94% rename from aws-crossplane/poc-send-events.sh rename to aws-crossplane/demo-send-events.sh index 96d2706..183e2b4 100755 --- a/aws-crossplane/poc-send-events.sh +++ b/aws-crossplane/demo-send-events.sh @@ -3,7 +3,7 @@ set -e MD5CMD=md5 -stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-demo-ingress) +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-cp-demo-ingress) if [ -z "$stream_name" ]; then echo "Stream not found" exit 1 diff --git a/aws-crossplane/demo-tail-egress.sh b/aws-crossplane/demo-tail-egress.sh new file mode 100755 index 0000000..566e659 --- /dev/null +++ b/aws-crossplane/demo-tail-egress.sh @@ -0,0 +1,23 @@ +#! /bin/bash + +set -e + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-cp-demo-egress) + +get_records_response=$(mktemp) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +while [ "true" ]; do + aws kinesis get-records --shard-iterator $shard_iterator >$get_records_response + shard_iterator=$(cat $get_records_response | jq -crM .NextShardIterator) + record_count=0 + for encoded_data in $(cat $get_records_response | jq -crM .Records[].Data); do + record_count=$(expr $record_count + 1) + echo $encoded_data | base64 -d | jq . + done + if [ $record_count -eq 0 ]; then + sleep 2 + fi +done diff --git a/aws-cloudformation/poc-tail-logs.sh b/aws-crossplane/demo-tail-logs.sh similarity index 78% rename from aws-cloudformation/poc-tail-logs.sh rename to aws-crossplane/demo-tail-logs.sh index 2a326e5..f1c4d5a 100755 --- a/aws-cloudformation/poc-tail-logs.sh +++ b/aws-crossplane/demo-tail-logs.sh @@ -4,7 +4,6 @@ set -e cd $(dirname $0) -AWS_ACCOUNT_ID=${AWS_ACCOUNT_ID:-516535517513} NEXT_TOKEN_ARG= CWLOGS_DIR=.cwlogs @@ -19,11 +18,11 @@ fi while true; do CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json aws logs get-log-events \ - --start-from-head \ - $NEXT_TOKEN_ARG \ - --log-group-name managed-flink-poc-log-group-${AWS_ACCOUNT_ID} \ - --log-stream-name managed-flink-poc-log-stream-${AWS_ACCOUNT_ID} \ - >$CWLOG_FILE + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name flink-cp-demo-log-group \ + --log-stream-name flink-cp-demo-log-stream \ + >$CWLOG_FILE NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) echo $NEXT_TOKEN >$CWLOGS_DIR/next.token diff --git a/aws-crossplane/poc-get-events.sh b/aws-crossplane/poc-get-events.sh deleted file mode 100755 index 1baa5c0..0000000 --- a/aws-crossplane/poc-get-events.sh +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/bash - -# Get the events sent to the egress stream -stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-demo-egress) - -shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) -shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) -for encoded_data in $(aws kinesis get-records --shard-iterator $shard_iterator | jq -crM .Records[].Data); do - echo $encoded_data | base64 -d | jq . -done diff --git a/aws-crossplane/resources/flink/README.md b/aws-crossplane/resources/flink/README.md deleted file mode 100644 index 5573cd7..0000000 --- a/aws-crossplane/resources/flink/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Two compositions are provided. The first, `flink-basic-comp.yaml` creates a Managed Flink instance and associated -CloudWatch log group and stream. This composition results in a Managed Flink instance in the 'Ready' state. The second, -`flink-lambda-comp.yaml` goes further to also create a lambda that observes the Managed Flink instance and automatically -transitions it to the running state. \ No newline at end of file diff --git a/aws-crossplane/resources/flink/flink-basic-example-claim.yaml b/aws-crossplane/resources/flink/flink-basic-example-claim.yaml deleted file mode 100644 index 84d4e09..0000000 --- a/aws-crossplane/resources/flink/flink-basic-example-claim.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: kellinwood.com/v1alpha1 -kind: ManagedFlink -metadata: - name: flink-demo - namespace: default -spec: - resourceConfig: - region: us-east-2 - name: flink-demo - codeBucket: flink-demo-bucket - codeFile: my-stateful-functions-embedded-java-3.3.0.jar - runtime: FLINK-1_18 - parallelism: 1 -# startApplication: true - environmentProperties: - - propertyGroup: - - propertyGroupId: StatefunApplicationProperties - propertyMap: - EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress - EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress - AWS_REGION: us-east-2 - compositionSelector: - matchLabels: - appReadyHandler: none # Use the composition that doesn't provision a lambda function to handle the app ready signal - - diff --git a/aws-crossplane/resources/flink/flink-basic-comp.yaml b/aws-crossplane/resources/flink/flink-comp.yaml similarity index 96% rename from aws-crossplane/resources/flink/flink-basic-comp.yaml rename to aws-crossplane/resources/flink/flink-comp.yaml index d390630..3d22a8b 100644 --- a/aws-crossplane/resources/flink/flink-basic-comp.yaml +++ b/aws-crossplane/resources/flink/flink-comp.yaml @@ -47,7 +47,7 @@ spec: AWS_REGION: us-west-1 flinkApplicationConfiguration: - checkpointConfiguration: - - checkpointInterval: 300000 # 5 mins * 60 secs/min * 1000 millis/sec + - checkpointInterval: 60000 # every minute # Update to 5 mins for production checkpointingEnabled: true configurationType: CUSTOM monitoringConfiguration: @@ -150,8 +150,8 @@ spec: { "Effect": "Allow", "Resource": [ - "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-ingress", - "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-egress" + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-ingress", + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-egress" ], "Action": [ "kinesis:DescribeStream", @@ -170,7 +170,7 @@ spec: { "Effect": "Allow", "Resource": [ - "arn:aws:logs:us-east-2:516535517513:log-group:flink-demo-app-log-group" + "arn:aws:logs:us-east-2:516535517513:log-group:flink-cp-demo-log-group" ], "Action": [ "logs:DescribeLogGroups", @@ -227,7 +227,7 @@ spec: - type: string string: type: Format - fmt: "%s-app-log-group" + fmt: "%s-log-group" - name: log-stream base: apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 @@ -252,7 +252,7 @@ spec: - type: string string: type: Format - fmt: "%s-app-log-stream" + fmt: "%s-log-stream" - type: FromCompositeFieldPath fromFieldPath: spec.resourceConfig.name toFieldPath: spec.forProvider.name @@ -260,5 +260,5 @@ spec: - type: string string: type: Format - fmt: "%s-app-log-stream" + fmt: "%s-log-stream" \ No newline at end of file diff --git a/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml b/aws-crossplane/resources/flink/flink-example-claim.yaml similarity index 89% rename from aws-crossplane/resources/flink/flink-lambda-example-claim.yaml rename to aws-crossplane/resources/flink/flink-example-claim.yaml index b088589..6a09ce0 100644 --- a/aws-crossplane/resources/flink/flink-lambda-example-claim.yaml +++ b/aws-crossplane/resources/flink/flink-example-claim.yaml @@ -11,6 +11,7 @@ spec: codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 parallelism: 1 + # startApplication: true environmentProperties: - propertyGroup: - propertyGroupId: StatefunApplicationProperties @@ -18,8 +19,3 @@ spec: EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress AWS_REGION: us-east-2 - compositionSelector: - matchLabels: - appReadyHandler: lambda - - diff --git a/aws-crossplane/resources/flink/flink-lambda-comp.yaml b/aws-crossplane/resources/flink/flink-lambda-comp.yaml deleted file mode 100644 index aea4e58..0000000 --- a/aws-crossplane/resources/flink/flink-lambda-comp.yaml +++ /dev/null @@ -1,463 +0,0 @@ ---- -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: flinklambdastart.kellinwood.com - labels: - appReadyHandler: lambda -spec: - compositeTypeRef: - apiVersion: kellinwood.com/v1alpha1 - kind: XManagedFlink - mode: Pipeline - pipeline: - - step: patch-and-transform - functionRef: - name: function-patch-and-transform - input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: managed-flink-application - base: - apiVersion: kinesisanalyticsv2.aws.upbound.io/v1beta1 - kind: Application - metadata: - annotations: - meta.upbound.io/example-id: kinesisanalyticsv2/v1beta1/application - name: example - spec: - forProvider: - applicationConfiguration: - - applicationCodeConfiguration: - - codeContentType: ZIPFILE - codeContent: - - s3ContentLocation: - - fileKey: example-flink-application.jar - bucketArnSelector: - matchLabels: - crossplane.io/claim-name: example-bucket - applicationSnapshotConfiguration: - - snapshotsEnabled: true - environmentProperties: - - propertyGroup: - - propertyGroupId: MyAppProperties - propertyMap: - FOO: bar - AWS_REGION: us-west-1 - flinkApplicationConfiguration: - - checkpointConfiguration: - - checkpointInterval: 300000 # 5 mins * 60 secs/min * 1000 millis/sec - checkpointingEnabled: true - configurationType: CUSTOM - monitoringConfiguration: - - logLevel: INFO - metricsLevel: TASK - configurationType: CUSTOM - parallelismConfiguration: - - autoScalingEnabled: false - parallelism: 2 - parallelismPerKpu: 1 - configurationType: CUSTOM - runConfiguration: - - applicationRestoreConfiguration: - - applicationRestoreType: RESTORE_FROM_LATEST_SNAPSHOT # RESTORE_FROM_CUSTOM_SNAPSHOT, RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT - # snapshotName: xyz # Specify this when restoreType = RESTORE_FROM_CUSTOM_SNAPSHOT - flinkRunConfiguration: - - allowNonRestoredState: false - applicationMode: STREAMING - cloudwatchLoggingOptions: - - logStreamArnSelector: - matchControllerRef: true - region: us-east-2 - runtimeEnvironment: FLINK-1_18 - serviceExecutionRoleSelector: - matchLabels: - rolePurpose: flink-application - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-application" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.codeFile - toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].fileKey - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.codeBucket - toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].bucketArnSelector.matchLabels['crossplane.io/claim-name'] - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.runtime - toFieldPath: spec.forProvider.runtimeEnvironment - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.parallelism - toFieldPath: spec.forProvider.applicationConfiguration[0].flinkApplicationConfiguration[0].parallelismConfiguration[0].parallelism - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.environmentProperties - toFieldPath: spec.forProvider.applicationConfiguration[0].environmentProperties - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.region - toFieldPath: spec.forProvider.region - - type: ToCompositeFieldPath - fromFieldPath: status.atProvider.arn - toFieldPath: status.managedFlinkArn - - type: ToCompositeFieldPath - fromFieldPath: status.atProvider.id - toFieldPath: status.managedFlinkName - - name: managed-flink-role - base: - apiVersion: iam.aws.upbound.io/v1beta1 - kind: Role - metadata: - annotations: - meta.upbound.io/example-id: iam/v1beta1/role - labels: - rolePurpose: flink-application - name: example - spec: - forProvider: - assumeRolePolicy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "kinesisanalytics.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - } - managedPolicyArns: - - arn:aws:iam::aws:policy/AmazonKinesisFullAccess - - arn:aws:iam::aws:policy/AmazonS3FullAccess - - arn:aws:iam::aws:policy/CloudWatchFullAccess - inlinePolicy: - - name: kinesis_policy - policy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": [ - "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-ingress", - "arn:aws:kinesis:us-east-2:516535517513:stream/flink-demo-egress" - ], - "Action": [ - "kinesis:DescribeStream", - "kinesis:GetRecords", - "kinesis:GetShardIterator", - "kinesis:ListShards" - ] - } - ] - } - - name: logs_policy - policy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": [ - "arn:aws:logs:us-east-2:516535517513:log-group:flink-demo-app-log-group" - ], - "Action": [ - "logs:DescribeLogGroups", - "logs:DescribeLogStreams", - "logs:PutLogEvents" - ] - } - ] - } - - name: metrics_policy - policy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": "*", - "Action": [ - "cloudwatch:PutMetricData" - ] - } - ] - } - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-app-role" - - name: log-group - base: - apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 - kind: Group - metadata: - annotations: - meta.upbound.io/example-id: cloudwatchlogs/v1beta1/group - name: example - spec: - forProvider: - region: us-east-2 - retentionInDays: 7 - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-app-log-group" - - name: log-stream - base: - apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 - kind: Stream - metadata: - annotations: - meta.upbound.io/example-id: cloudwatchlogs/v1beta1/stream - name: example - spec: - forProvider: - logGroupNameSelector: - matchControllerRef: true - name: example - region: us-east-2 - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-app-log-stream" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: spec.forProvider.name - transforms: - - type: string - string: - type: Format - fmt: "%s-app-log-stream" - - name: starter-lambda - base: - apiVersion: lambda.aws.upbound.io/v1beta1 - kind: Function - metadata: - annotations: - meta.upbound.io/example-id: lambda/v1beta1/function - name: example - spec: - forProvider: - handler: start_flink.lambda_handler - packageType: Zip - region: us-east-2 - roleSelector: - matchLabels: - rolePurpose: starter-lambda - runtime: python3.10 - s3BucketSelector: - matchLabels: - crossplane.io/claim-name: flink-demo-bucket - s3Key: start_flink_py.zip - timeout: 60 - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-starter" - - name: starter-lambda-role - base: - apiVersion: iam.aws.upbound.io/v1beta1 - kind: Role - metadata: - annotations: - meta.upbound.io/example-id: iam/v1beta1/role - labels: - rolePurpose: starter-lambda - name: example - spec: - forProvider: - assumeRolePolicy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "lambda.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - } - inlinePolicy: - - name: flink_permissions - policy: |- - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": [ - "arn:aws:kinesisanalytics:us-east-2:516535517513:application/flink-demo-application" - ], - "Action": [ - "kinesisanalytics:DescribeApplication", - "kinesisanalytics:StartApplication" - ] - } - ] - } - managedPolicyArns: - - arn:aws:iam::aws:policy/AmazonKinesisFullAccess - - arn:aws:iam::aws:policy/CloudWatchFullAccess - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-starter-role" - - name: starter-lambda-invoke-perms - base: - apiVersion: lambda.aws.upbound.io/v1beta1 - kind: Permission - metadata: - annotations: - meta.upbound.io/example-id: lambda/v1beta1/permission - name: example-starter-permission - spec: - forProvider: - action: lambda:InvokeFunction - functionNameSelector: - matchControllerRef: true - principal: events.amazonaws.com - region: us-east-2 - sourceArn: arn:aws:events:us-east-2:516535517513:rule/example-eventbridge-rule - statementId: example-starter-permission - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-starter-permission" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: spec.forProvider.statementId - transforms: - - type: string - string: - type: Format - fmt: "%s-starter-permission" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: spec.forProvider.sourceArn - transforms: - - type: string - string: - type: Format - fmt: "arn:aws:events:us-east-2:516535517513:rule/%s-eventbridge-rule" - - name: eventbridge-rule - base: - apiVersion: cloudwatchevents.aws.upbound.io/v1beta1 - kind: Rule - metadata: - annotations: - meta.upbound.io/example-id: cloudwatchevents/v1beta2/rule - name: example-eventbridge-rule - spec: - forProvider: - description: Process Managed Flink events - eventBusName: default - eventPattern: | - { - "source": ["aws.kinesisanalytics"] - } - region: us-east-2 - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-eventbridge-rule" - - name: eventbridge-target - base: - apiVersion: cloudwatchevents.aws.upbound.io/v1beta1 - kind: Target - metadata: - name: example - spec: - forProvider: - arn: example - eventBusName: default - region: us-east-2 - ruleSelector: - matchControllerRef: true - targetId: example - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-eventbridge-target" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: spec.forProvider.targetId - transforms: - - type: string - string: - type: Format - fmt: "%s-eventbridge-target" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: spec.forProvider.arn - transforms: - - type: string - string: - type: Format - fmt: "arn:aws:lambda:us-east-2:516535517513:function:%s-starter" diff --git a/aws-crossplane/resources/s3/index.js b/aws-crossplane/resources/s3/index.js deleted file mode 100644 index fc315fb..0000000 --- a/aws-crossplane/resources/s3/index.js +++ /dev/null @@ -1,8 +0,0 @@ - exports.handler = async (event) => { - console.log(event); - const response = { - statusCode: 200, - event: event - }; - return response; - }; diff --git a/aws-crossplane/resources/s3/s3-object-comp.yaml b/aws-crossplane/resources/s3/s3-object-comp.yaml deleted file mode 100644 index 7f9d168..0000000 --- a/aws-crossplane/resources/s3/s3-object-comp.yaml +++ /dev/null @@ -1,60 +0,0 @@ ---- -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: s3objects.kellinwood.com -spec: - compositeTypeRef: - apiVersion: kellinwood.com/v1alpha1 - kind: XS3Object - mode: Pipeline - pipeline: - - step: patch-and-transform - functionRef: - name: function-patch-and-transform - input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: s3-object - base: - apiVersion: s3.aws.upbound.io/v1beta1 - kind: Object - metadata: - annotations: - meta.upbound.io/example-id: s3/v1beta1/object - name: placeholder - spec: - forProvider: - region: us-east-2 - key: placeholder - bucketSelector: - matchLabels: - crossplane.io/claim-name: placeholder - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: metadata.name - toFieldPath: metadata.name - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.key - toFieldPath: spec.forProvider.key - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.region - toFieldPath: spec.forProvider.region - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.bucket - toFieldPath: spec.forProvider.bucketSelector.matchLabels['crossplane.io/claim-name'] - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.content - toFieldPath: spec.forProvider.content - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.contentBase64 - toFieldPath: spec.forProvider.contentBase64 - - type: ToCompositeFieldPath - fromFieldPath: status.atProvider.arn - toFieldPath: status.objectArn - - type: ToCompositeFieldPath - fromFieldPath: status.atProvider.keyy - toFieldPath: status.objectName diff --git a/aws-crossplane/resources/s3/s3-object-example-claim.yaml b/aws-crossplane/resources/s3/s3-object-example-claim.yaml deleted file mode 100644 index b406b83..0000000 --- a/aws-crossplane/resources/s3/s3-object-example-claim.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: kellinwood.com/v1alpha1 -kind: S3Object -metadata: - name: index.zip - namespace: default -spec: - resourceConfig: - region: us-east-2 - bucket: flink-demo-bucket - key: index.zip -# The content of the index.zip is a single file, index.js, containing: -# exports.handler = async (event) => { -# console.log(event); -# const response = { -# statusCode: 200, -# event: event -# }; -# return response; -# }; - contentBase64: UEsDBBQAAAAIACZUdln/4COOcwAAANEAAAAIABwAaW5kZXguanNVVAkAA4fqQGeK6kBndXgLAAEE9gEAAAQUAAAAU1AAgdSKgvyikmK9jMS8lJzUIgVbhcTiyrxkBY3UstS8Ek0FWzuFai4FOEjOzyvOz0nVy8lPh6qwRpMtUShKLS4AMlKBZiFrBYHiksSS0mLn/JRUKwUjAwMdNGmwiVYQCkmqFtmOotSS0qI8uCUwKaAaAFBLAQIeAxQAAAAIACZUdln/4COOcwAAANEAAAAIABgAAAAAAAEAAACkgQAAAABpbmRleC5qc1VUBQADh+pAZ3V4CwABBPYBAAAEFAAAAFBLBQYAAAAAAQABAE4AAAC1AAAAAAA= diff --git a/aws-crossplane/resources/s3/s3-object-xrd.yaml b/aws-crossplane/resources/s3/s3-object-xrd.yaml deleted file mode 100644 index 3314b4f..0000000 --- a/aws-crossplane/resources/s3/s3-object-xrd.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xs3objects.kellinwood.com -spec: - group: kellinwood.com - names: - kind: XS3Object - plural: xs3objects - claimNames: - kind: S3Object - plural: s3objects - versions: - - name: v1alpha1 - served: true - referenceable: true - schema: - openAPIV3Schema: - properties: - spec: - properties: - resourceConfig: - properties: - region: - type: string - bucket: - type: string - key: - type: string - content: - type: string - contentBase64: - type: string - type: object - type: object - status: - properties: - objectName: - type: string - objectArn: - type: string - type: object - type: object diff --git a/aws-crossplane/start-flink-lambda/.gitignore b/aws-crossplane/start-flink-lambda/.gitignore deleted file mode 100644 index 5815224..0000000 --- a/aws-crossplane/start-flink-lambda/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -package -venv.demo -.DS_Store -*.zip diff --git a/aws-crossplane/start-flink-lambda/README.md b/aws-crossplane/start-flink-lambda/README.md deleted file mode 100644 index 1b0cd99..0000000 --- a/aws-crossplane/start-flink-lambda/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Instructions to build the lambda handler Zip file -These instructions result in the creation of `start_flink_py.zip` -``` -cd aws-crossplane/start-flink-lambda/ -mkdir package -python3 -m venv venv.demo -source venv.demo/bin/activate -pip install --target ./package boto3 -./build_start_flink_py_zip.sh -``` diff --git a/aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh b/aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh deleted file mode 100755 index 1ca80f2..0000000 --- a/aws-crossplane/start-flink-lambda/build_start_flink_py_zip.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash - -set -e - -cd $(dirname $0) -rm -rf start_flink_py.zip -cd package -zip -r ../start_flink_py.zip . -cd .. -zip start_flink_py.zip start_flink.py - diff --git a/aws-crossplane/start-flink-lambda/start_flink.py b/aws-crossplane/start-flink-lambda/start_flink.py deleted file mode 100644 index c80a3c9..0000000 --- a/aws-crossplane/start-flink-lambda/start_flink.py +++ /dev/null @@ -1,48 +0,0 @@ -import logging -import boto3 - -logger = logging.getLogger() -logger.setLevel(logging.INFO) - -def lambda_handler(event, context): - - if 'detail' in event: - event_detail = event['detail'] - else: - logger.info('Ignoring - missing .detail in : {}'.format(event)) - return - - event_name = 'UnknownEventName' - if 'eventName' in event_detail: - event_name = event_detail['eventName'] - - logger.info('Incoming event {}: {}'.format(event_name, event)) - - # Ignore events other than Create or Update, - if event_name not in ['CreateApplication', 'UpdateApplication']: - logger.info('Ignoring - eventName={}'.format(event_name)) - return - - try: - region = event['region'] - application_name = event_detail['responseElements']['applicationDetail']['applicationName'] - - # kinesisanalyticsv2 API reference: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesisanalyticsv2.html - client_kda = boto3.client('kinesisanalyticsv2', region_name=region) - - describe_response = client_kda.describe_application(ApplicationName=application_name) - logger.info(f'describe_application response: {describe_response}') - - # get application status. - application_status = describe_response['ApplicationDetail']['ApplicationStatus'] - - # an application can be started from 'READY' status only. - if application_status != 'READY': - logger.info('No-op for Application {} because ApplicationStatus {} is filtered'.format(application_name, application_status)) - return - - # this call doesn't wait for an application to transfer to 'RUNNING' state. - client_kda.start_application(ApplicationName=application_name) - logger.info('Started Application: {}'.format(application_name)) - except Exception as err: - logger.error(err) diff --git a/aws-terraform/demo-send-events.sh b/aws-terraform/demo-send-events.sh new file mode 100755 index 0000000..d9da9c6 --- /dev/null +++ b/aws-terraform/demo-send-events.sh @@ -0,0 +1,21 @@ +#! /bin/bash + +set -e +if [ $(uname) = "Darwin" ]; then + MD5SUM=md5 +else + MD5SUM=md5sum +fi + +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-tf-demo-ingress) +if [ -z "$stream_name" ]; then + echo "Stream not found" + exit 1 +fi +grep -v test.action ../src/test/resources/product-cart-integration-test-events.jsonl | while read line; do + partkey=$(echo $line | $MD5SUM | awk '{print $1}') + data=$(echo $line | base64) + cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" + echo $cmd + eval $cmd +done diff --git a/aws-terraform/demo-tail-egress.sh b/aws-terraform/demo-tail-egress.sh new file mode 100755 index 0000000..7749263 --- /dev/null +++ b/aws-terraform/demo-tail-egress.sh @@ -0,0 +1,24 @@ +#! /bin/bash + +set -e + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-tf-demo-egress) + +get_records_response=$(mktemp) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +while [ "true" ]; do + aws kinesis get-records --shard-iterator $shard_iterator >$get_records_response + shard_iterator=$(cat $get_records_response | jq -crM .NextShardIterator) + record_count=0 + for encoded_data in $(cat $get_records_response | jq -crM .Records[].Data); do + record_count=$(expr $record_count + 1) + echo $encoded_data | base64 -d | jq . + done + if [ $record_count -eq 0 ]; then + sleep 2 + fi +done + diff --git a/aws-terraform/demo-tail-logs.sh b/aws-terraform/demo-tail-logs.sh new file mode 100755 index 0000000..bc24d7b --- /dev/null +++ b/aws-terraform/demo-tail-logs.sh @@ -0,0 +1,40 @@ +#! /bin/bash + +set -e + +cd $(dirname $0) + +NEXT_TOKEN_ARG= + +CWLOGS_DIR=.cwlogs +mkdir -p $CWLOGS_DIR + +ITERATION=1 + +if [ -f $CWLOGS_DIR/next.token ]; then + NEXT_TOKEN_ARG="--next-token $(cat $CWLOGS_DIR/next.token)" +fi + +while true; do + CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json + aws logs get-log-events \ + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name flink-tf-demo-log-group \ + --log-stream-name flink-tf-demo-log-stream \ + >$CWLOG_FILE + + NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) + echo $NEXT_TOKEN >$CWLOGS_DIR/next.token + NEXT_TOKEN_ARG="--next-token $NEXT_TOKEN" + EVENT_COUNT=$(cat $CWLOG_FILE | jq -crM '.events | length') + + if [[ $EVENT_COUNT == 0 ]]; then + sleep 2 + rm $CWLOG_FILE + else + cat $CWLOG_FILE | jq -crM '.events[] | [.timestamp,(.message | fromjson | [.messageType,.logger,.message] | join(" "))] | join(" ")' | tee -a $CWLOGS_DIR/formatted.log + fi + + ITERATION=$(echo "1 + $ITERATION" | bc) +done diff --git a/aws-terraform/main.tf b/aws-terraform/main.tf index 440e405..9c483e8 100644 --- a/aws-terraform/main.tf +++ b/aws-terraform/main.tf @@ -9,7 +9,7 @@ data "aws_region" "current" {} resource "aws_s3_bucket" "flink_demo_bucket" { # Bucket names must be globally unique, so I'm appending the account ID to workaround BucketAlreadyExists - bucket = "flink-demo-bucket-${data.aws_caller_identity.current.account_id}" + bucket = "flink-tf-demo-bucket-${data.aws_caller_identity.current.account_id}" } resource "aws_s3_bucket_ownership_controls" "flink_demo_bucket_ownership_controls" { @@ -27,7 +27,7 @@ resource "aws_s3_bucket_acl" "flink_demo_bucket_acl" { } resource "aws_kinesis_stream" "flink_demo_ingress" { - name = "flink-demo-ingress" + name = "flink-tf-demo-ingress" shard_count = 1 retention_period = 24 # Retention period in hours @@ -42,7 +42,7 @@ resource "aws_kinesis_stream" "flink_demo_ingress" { } resource "aws_kinesis_stream" "flink_demo_egress" { - name = "flink-demo-egress" + name = "flink-tf-demo-egress" shard_count = 1 retention_period = 24 # Retention period in hours @@ -148,12 +148,12 @@ resource "aws_iam_role_policy" "flink_app_logs_policy" { } resource "aws_cloudwatch_log_group" "flink_demo_log_group" { - name = "flink-demo-log-group" + name = "flink-tf-demo-log-group" retention_in_days = 14 } resource "aws_cloudwatch_log_stream" "flink_demo_log_stream" { - name = "flink-demo-log-stream" + name = "flink-tf-demo-log-stream" log_group_name = aws_cloudwatch_log_group.flink_demo_log_group.name } @@ -174,7 +174,7 @@ resource "aws_iam_role_policy" "flink_app_metrics_policy" { # Reference: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kinesisanalyticsv2_application resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { - name = "flink_demo_tf" + name = "flink-tf-demo-application" runtime_environment = "FLINK-1_18" service_execution_role = aws_iam_role.flink_application_role.arn application_mode = "STREAMING" @@ -185,7 +185,7 @@ resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { code_content { s3_content_location { bucket_arn = aws_s3_bucket.flink_demo_bucket.arn - file_key = "my-stateful-functions-embedded-java-3.3.0.jar" + file_key = "my-stateful-functions-embedded-java-3.3.0.jar.1" } } code_content_type = "ZIPFILE" @@ -210,7 +210,7 @@ resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { flink_application_configuration { checkpoint_configuration { configuration_type = "CUSTOM" - checkpoint_interval = 300000 # 5 mins * 60 secs/min * 1000 millis/sec + checkpoint_interval = 60000 # Every minute # Increase this to 300000 in production (every 5 minutes) checkpointing_enabled = true } monitoring_configuration { @@ -239,4 +239,8 @@ resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { cloudwatch_logging_options { log_stream_arn = aws_cloudwatch_log_stream.flink_demo_log_stream.arn } + + tags = { + ProvisionedBy = "Terraform" + } } diff --git a/docker-compose.yml b/docker-compose.yml index 608de74..67375cc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -57,6 +57,7 @@ services: -D "state.savepoints.dir=file:///savepoints" -D "state.checkpoints.dir=file:///checkpoints" --job-classname org.apache.flink.statefun.flink.core.StatefulFunctionsJob +# --fromSavepoint=file:///savepoints/savepoint-xxxx-yyyyyy entrypoint: /entrypoint.sh expose: - "6123" diff --git a/src/main/java/com/example/stateful_functions/function/AbstractStatefulFunction.java b/src/main/java/com/example/stateful_functions/function/AbstractStatefulFunction.java index 87a61f0..825a778 100644 --- a/src/main/java/com/example/stateful_functions/function/AbstractStatefulFunction.java +++ b/src/main/java/com/example/stateful_functions/function/AbstractStatefulFunction.java @@ -30,7 +30,7 @@ public abstract class AbstractStatefulFunction implements StatefulFunction { public abstract FunctionType getFunctionType(); @Autowired - ExampleCloudEventJsonFormat cloudEventJsonFormat; + protected ExampleCloudEventJsonFormat cloudEventJsonFormat; @Autowired protected ExampleCloudEventDataAccess cloudEventDataAccess; diff --git a/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java b/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java index b4ba427..a4a7850 100644 --- a/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java +++ b/src/main/java/com/example/stateful_functions/function/cart/CartStatefulFunction.java @@ -79,7 +79,14 @@ private void productSubscription(Context context, String cartId, String productI private void handleCartProductEvent(Context context, CloudEvent event) { CartProductEventDetails cartProduct = cloudEventDataAccess.toCartProductEventDetails(event); - CartStateDetails cartState = state.getOrDefault(() -> new CartStateDetails(cartProduct.getCartId())); + CartStateDetails cartState = state.get(); + if (cartState == null) { + LOG.info("Creating state for {}", context.self().id()); + cartState = new CartStateDetails(cartProduct.getCartId()); + } + else { + LOG.info("Updating state for {}", context.self().id()); + } CartItemStateDetails cartItem = cartState.getItems().get(cartProduct.getProductId()); @@ -87,7 +94,6 @@ private void handleCartProductEvent(Context context, CloudEvent event) { final int resultingItemQuantity; if (cartItem == null) { - startingItemQuantity = 0; resultingItemQuantity = cartProduct.getQuantity(); } else { @@ -130,9 +136,13 @@ private void handleCartProductEvent(Context context, CloudEvent event) { private void handleProductEvent(Context context, CloudEvent event) { CartStateDetails cartState = state.get(); if (cartState == null) { + LOG.info("Nonexistent state for {}", context.self().id()); // Nothing to do return; } + else { + LOG.info("Updating state for {}", context.self().id()); + } ProductEventDetails productDetails = cloudEventDataAccess.toProductEventDetails(event); CartItemStateDetails cartItem = cartState.getItems().get(productDetails.getId()); @@ -172,6 +182,7 @@ private void egressCartStatus(Context context, CartStateDetails cartState) { .withTime(OffsetDateTime.now(ZoneOffset.UTC)) .build(); + LOG.info("Publishing cart status event to egress: {}", cloudEventJsonFormat.serialize(cartStatusEvent)); egressEvent(context, cartStatusEvent, cartState.getId()); } } \ No newline at end of file diff --git a/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java b/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java index 8596550..7c0a947 100644 --- a/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java +++ b/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java @@ -87,6 +87,12 @@ public void handleEvent(Context context, CloudEvent event) { } private void handleProductEvent(Context context, CloudEvent event) { + if (state.get() == null) { + LOG.info("Creating state for {}", context.self().id()); + } + else { + LOG.info("Updating state for {}", context.self().id()); + } state.set(fromProductEventDetails(cloudEventDataAccess.toProductEventDetails(event))); notifySubscribers(context, event); } From 0e485802e5f0d3ab28d4a2020d1c03abddcaa87f Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 2 Jan 2025 13:34:07 -1000 Subject: [PATCH 18/28] Fix path to update_credentials.sh --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index aac75c2..a0908f1 100644 --- a/README.md +++ b/README.md @@ -292,7 +292,7 @@ Login to AWS Identity Center, and copy the AWS credential environment variables Paste and execute the AWS environment variable commands, then run this script: ``` -./local/aws/update_credentials.sh +./cloud/aws/update_credentials.sh ``` Launch the local IDP using idpbuilder (https://github.com/cnoe-io/idpbuilder) From b02a69349b7a2d6d52c71e5a945f7423d53b30ec Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Fri, 3 Jan 2025 09:43:48 -1000 Subject: [PATCH 19/28] More README fixes --- README.md | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index a0908f1..99f303d 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,9 @@ The purpose of this project is two-fold. [Caliper](https://www.imsglobal.org/spec/caliper/v1p2) events. * This project egresses results as events to a separate stream, whereas at Imagine Learning we mostly send our results directly to OpenSearch and occasionally write events back to the ingress stream. - 2. It will serve as the basis for an evaluation of Stateful Functions running on + 2. It serves as the basis for an evaluation of Stateful Functions running on [AWS Managed Flink](https://docs.aws.amazon.com/managed-flink/). At the time of - this writing Imagine Learning runs stateful functions on self-managed Kubernetes clusters, but we are looking to + this writing Imagine Learning runs Flink Stateful Functions on self-managed Kubernetes clusters, but we are looking to see if AWS Managed Flink is a viable alternative. @@ -23,7 +23,8 @@ This project demonstrates stateful functions under test in various ways: * run-time execution in standalone job mode via docker compose -The project implements embedded functions (functions that execute in the Flink taskmanagers). Remote functions are future work. +The project implements embedded functions (functions that execute in the Flink taskmanagers). Remote functions are +future work. This is an opinionated project. It uses... * Spring Framework for dependency injection @@ -38,7 +39,7 @@ This is an opinionated project. It uses... Each forwarder is small piece of code that routes one or more specific event types to a stateful function. To start routing a new event type, just implement another Forwarder. -## What this Stateful Functions appication does +## What this Stateful Functions application does Example events and functions are provided which demonstrate notifying a shopping cart service of product price and availability changes for items in users' carts. The project assumes the existence of upstream microservices that send Product events (name,price,availability) and @@ -98,7 +99,7 @@ building and installing Apache Flink Stateful Functions compatible with Flink 1. ./mvnw test ``` -## Running the project via Docker Compose +## Running the project locally via Docker Compose Follow the instructions below to run the project via Docker Compose. Note that Kinesis support is provided by a [localstack](https://www.localstack.cloud/) container. @@ -138,7 +139,7 @@ docker compose --profile all down ### Version compatibility between AWS Managed Flink and Stateful Functions The latest release of Apache Flink Stateful Functions is 3.3, but its compiled and built -to run with Flink 1.16.2. AWS Managed Flink supports Flink versions 1.15 and 1.18. So the first +to run with Flink 1.16.2. AWS Managed Flink currently supports Flink versions 1.15 and 1.18. So the first step towards running via AWS Managed Flink is to create a version of the stateful functions library compatible with Flink 1.18. The required changes are provided here: https://github.com/kellinwood/flink-statefun/pull/1/files. @@ -221,7 +222,7 @@ Terraform yet, so keep trying until it succeeds. ```shell export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar \ - s3://flink-demo-bucket-${AWS_ACCOUNT_ID}/ + s3://flink-tf-demo-bucket-${AWS_ACCOUNT_ID}/ ``` Wait for the `terraform apply` command to complete. @@ -247,7 +248,7 @@ wait for new entries to arrive and display them too. ``` #### Cleanup -Cleanup by manually deleting the jar file from the S3 bucket, `flink-demo-bucket-${AWS_ACCOUNT_ID}`, and the Kinesis +Cleanup by manually deleting the jar file from the S3 bucket, `flink-tf-demo-bucket-${AWS_ACCOUNT_ID}`, and the Kinesis stream `flink-tf-demo-ingress`. Run the `terraform destroy` command. Note that the manual deletions are required since Terraform can't delete a non-empty bucket, and can't delete the ingress stream since Flink adds a fanout consumer to the stream which will block the deletion attempted by Terraform. @@ -271,10 +272,10 @@ terraform destroy # When prompted, enter 'yes' #### Introduction This demo of provisioning via Crossplane is nowhere near production quality. It merely demonstrates that it is possible -to provision and run an AWS Managed Flink application via crossplane. Many tasks normally performed via CI/CD must be -completed manually as described below. The crossplane compositions currently use `function-patch-and-transform` instead -of a custom composition function, and because of that, many things in the compositions remain hard-coded (AWS account -number, region, ARNs in IAM roles, etc). +to provision and run an AWS Managed Flink application via Crossplane. Many tasks normally performed via CI/CD must be +completed manually as described below. The compositions currently use `function-patch-and-transform` instead of a custom +composition function, and many things in the compositions remain hard-coded (AWS account number, region, ARNs in IAM +roles, etc). @@ -338,7 +339,7 @@ The output of `kubectl get managed` will reveal the actual S3 bucket name under Return to AWS Identity Center and launch the web console for the account. -Visit the S3 services page. Find the S3 bucket (flink-demo-bucket-*) and upload the following file to the bucket +Visit the S3 services page. Find the S3 bucket (flink-cp-demo-bucket-*) and upload the following file to the bucket - `../target/my-stateful-functions-embedded-java-3.3.0.jar` (Flink demo application code) Alternatively, use the AWS CLI to upload the file... From 78d8e62a2993f861313c86f21f60c138dc6551e6 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Fri, 3 Jan 2025 11:07:24 -1000 Subject: [PATCH 20/28] Remove unused providers --- .../cloud/aws/manifests/aws-services.yaml | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/aws-crossplane/cloud/aws/manifests/aws-services.yaml b/aws-crossplane/cloud/aws/manifests/aws-services.yaml index 14107af..1c0ce13 100644 --- a/aws-crossplane/cloud/aws/manifests/aws-services.yaml +++ b/aws-crossplane/cloud/aws/manifests/aws-services.yaml @@ -43,22 +43,3 @@ spec: package: xpkg.upbound.io/upbound/provider-aws-cloudwatchlogs:v1.17.0 controllerConfigRef: name: aws-config ---- -apiVersion: pkg.crossplane.io/v1 -kind: Provider -metadata: - name: provider-aws-cloudwatchevents -spec: - package: xpkg.upbound.io/upbound/provider-aws-cloudwatchevents:v1.17.0 - controllerConfigRef: - name: aws-config ---- -apiVersion: pkg.crossplane.io/v1 -kind: Provider -metadata: - name: provider-aws-lambda - namespace: crossplane-system -spec: - package: xpkg.upbound.io/upbound/provider-aws-lambda:v1.17.0 - controllerConfigRef: - name: aws-config \ No newline at end of file From 58228639e22c870495886a6fc8d9b2fbddee7a65 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 9 Jan 2025 09:09:28 -1000 Subject: [PATCH 21/28] Rename ./cloud back to ./local, composition function v0.1 --- README.md | 4 +- aws-crossplane/claims/demo-setup-claims.yaml | 6 +- .../claims/managed-flink-claim.yaml | 4 +- .../aws/cert-creation/job.yaml | 0 .../{cloud => local}/aws/crossplane.yaml | 0 .../aws/manifests/aws-services.yaml | 0 .../{cloud => local}/aws/manifests/core.yaml | 0 .../aws/manifests/credentials.yaml | 0 .../patch-and-transform-function.yaml | 0 .../aws/manifests/provider-config.yaml | 0 .../aws/manifests/secret.yaml | 0 .../{cloud => local}/aws/providers.yaml | 0 .../aws/update_credentials.sh | 0 aws-crossplane/local/configure-xfn.sh | 13 + .../localstack/cert-creation/job.yaml | 0 .../localstack/configs/function.yaml | 0 .../localstack/configs/local-secret.yaml | 0 .../configs/provider-config-localstack.yaml | 0 .../localstack/configs/providers.yaml | 0 .../localstack/configs/services.yaml | 0 .../localstack/crossplane-configs.yaml | 0 .../localstack/crossplane.yaml | 0 .../localstack/localstack.yaml | 0 .../localstack/port-forward-idp-localstack.sh | 0 .../resources/flink/flink-comp.yaml | 4 +- .../resources/flink/flink-example-claim.yaml | 2 +- aws-crossplane/resources/flink/flink-xrd.yaml | 4 +- .../kinesis/kinesis-stream-comp.yaml | 4 +- .../kinesis/kinesis-stream-example-claim.yaml | 2 +- .../resources/kinesis/kinesis-stream-xrd.yaml | 4 +- .../resources/s3/s3-bucket-comp.yaml | 4 +- .../resources/s3/s3-bucket-example-claim.yaml | 2 +- .../resources/s3/s3-bucket-xrd.yaml | 4 +- aws-crossplane/xfn/.gitignore | 22 ++ aws-crossplane/xfn/.golangci.yml | 208 ++++++++++++ aws-crossplane/xfn/Dockerfile | 48 +++ aws-crossplane/xfn/LICENSE | 201 +++++++++++ aws-crossplane/xfn/NOTES.txt | 9 + aws-crossplane/xfn/README.md | 44 +++ aws-crossplane/xfn/example/README.md | 25 ++ aws-crossplane/xfn/example/composition.yaml | 16 + aws-crossplane/xfn/example/functions.yaml | 11 + aws-crossplane/xfn/example/xr.yaml | 22 ++ aws-crossplane/xfn/fn.go | 221 +++++++++++++ aws-crossplane/xfn/fn_test.go | 117 +++++++ aws-crossplane/xfn/go.mod | 76 +++++ aws-crossplane/xfn/go.sum | 311 ++++++++++++++++++ aws-crossplane/xfn/init.sh | 21 ++ aws-crossplane/xfn/input/generate.go | 15 + aws-crossplane/xfn/input/v1beta1/input.go | 24 ++ .../input/v1beta1/zz_generated.deepcopy.go | 34 ++ aws-crossplane/xfn/main.go | 38 +++ aws-crossplane/xfn/package/crossplane.yaml | 6 + .../template.fn.crossplane.io_inputs.yaml | 43 +++ aws-crossplane/xfn/renovate.json | 20 ++ 55 files changed, 1567 insertions(+), 22 deletions(-) rename aws-crossplane/{cloud => local}/aws/cert-creation/job.yaml (100%) rename aws-crossplane/{cloud => local}/aws/crossplane.yaml (100%) rename aws-crossplane/{cloud => local}/aws/manifests/aws-services.yaml (100%) rename aws-crossplane/{cloud => local}/aws/manifests/core.yaml (100%) rename aws-crossplane/{cloud => local}/aws/manifests/credentials.yaml (100%) rename aws-crossplane/{cloud => local}/aws/manifests/patch-and-transform-function.yaml (100%) rename aws-crossplane/{cloud => local}/aws/manifests/provider-config.yaml (100%) rename aws-crossplane/{cloud => local}/aws/manifests/secret.yaml (100%) rename aws-crossplane/{cloud => local}/aws/providers.yaml (100%) rename aws-crossplane/{cloud => local}/aws/update_credentials.sh (100%) create mode 100644 aws-crossplane/local/configure-xfn.sh rename aws-crossplane/{cloud => local}/localstack/cert-creation/job.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/configs/function.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/configs/local-secret.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/configs/provider-config-localstack.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/configs/providers.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/configs/services.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/crossplane-configs.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/crossplane.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/localstack.yaml (100%) rename aws-crossplane/{cloud => local}/localstack/port-forward-idp-localstack.sh (100%) create mode 100644 aws-crossplane/xfn/.gitignore create mode 100644 aws-crossplane/xfn/.golangci.yml create mode 100644 aws-crossplane/xfn/Dockerfile create mode 100644 aws-crossplane/xfn/LICENSE create mode 100644 aws-crossplane/xfn/NOTES.txt create mode 100644 aws-crossplane/xfn/README.md create mode 100644 aws-crossplane/xfn/example/README.md create mode 100644 aws-crossplane/xfn/example/composition.yaml create mode 100644 aws-crossplane/xfn/example/functions.yaml create mode 100644 aws-crossplane/xfn/example/xr.yaml create mode 100644 aws-crossplane/xfn/fn.go create mode 100644 aws-crossplane/xfn/fn_test.go create mode 100644 aws-crossplane/xfn/go.mod create mode 100644 aws-crossplane/xfn/go.sum create mode 100755 aws-crossplane/xfn/init.sh create mode 100644 aws-crossplane/xfn/input/generate.go create mode 100644 aws-crossplane/xfn/input/v1beta1/input.go create mode 100644 aws-crossplane/xfn/input/v1beta1/zz_generated.deepcopy.go create mode 100644 aws-crossplane/xfn/main.go create mode 100644 aws-crossplane/xfn/package/crossplane.yaml create mode 100644 aws-crossplane/xfn/package/input/template.fn.crossplane.io_inputs.yaml create mode 100644 aws-crossplane/xfn/renovate.json diff --git a/README.md b/README.md index 99f303d..c9de940 100644 --- a/README.md +++ b/README.md @@ -293,13 +293,13 @@ Login to AWS Identity Center, and copy the AWS credential environment variables Paste and execute the AWS environment variable commands, then run this script: ``` -./cloud/aws/update_credentials.sh +./local/aws/update_credentials.sh ``` Launch the local IDP using idpbuilder (https://github.com/cnoe-io/idpbuilder) ``` -idpbuilder create -p ./cloud/aws +idpbuilder create -p ./local/aws ``` The `idpbuilder create` command takes a few minutes to complete, and even then it will take more time for crossplane to diff --git a/aws-crossplane/claims/demo-setup-claims.yaml b/aws-crossplane/claims/demo-setup-claims.yaml index 85ece87..7b9e797 100644 --- a/aws-crossplane/claims/demo-setup-claims.yaml +++ b/aws-crossplane/claims/demo-setup-claims.yaml @@ -1,5 +1,5 @@ --- -apiVersion: kellinwood.com/v1alpha1 +apiVersion: example.com/v1alpha1 kind: S3Bucket metadata: name: flink-cp-demo-bucket @@ -8,7 +8,7 @@ spec: resourceConfig: region: us-east-2 --- -apiVersion: kellinwood.com/v1alpha1 +apiVersion: example.com/v1alpha1 kind: KinesisStream metadata: name: flink-cp-demo-ingress @@ -24,7 +24,7 @@ spec: createdBy: ken.ellinwood@imaginelearning.com purpose: statefun-ingress --- -apiVersion: kellinwood.com/v1alpha1 +apiVersion: example.com/v1alpha1 kind: KinesisStream metadata: name: flink-cp-demo-egress diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index ae4fad1..dd43cc0 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -1,4 +1,4 @@ -apiVersion: kellinwood.com/v1alpha1 +apiVersion: example.com/v1alpha1 kind: ManagedFlink metadata: name: flink-cp-demo @@ -10,7 +10,7 @@ spec: codeBucket: flink-cp-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar runtime: FLINK-1_18 - # startApplication: true + startApplication: true parallelism: 1 environmentProperties: - propertyGroup: diff --git a/aws-crossplane/cloud/aws/cert-creation/job.yaml b/aws-crossplane/local/aws/cert-creation/job.yaml similarity index 100% rename from aws-crossplane/cloud/aws/cert-creation/job.yaml rename to aws-crossplane/local/aws/cert-creation/job.yaml diff --git a/aws-crossplane/cloud/aws/crossplane.yaml b/aws-crossplane/local/aws/crossplane.yaml similarity index 100% rename from aws-crossplane/cloud/aws/crossplane.yaml rename to aws-crossplane/local/aws/crossplane.yaml diff --git a/aws-crossplane/cloud/aws/manifests/aws-services.yaml b/aws-crossplane/local/aws/manifests/aws-services.yaml similarity index 100% rename from aws-crossplane/cloud/aws/manifests/aws-services.yaml rename to aws-crossplane/local/aws/manifests/aws-services.yaml diff --git a/aws-crossplane/cloud/aws/manifests/core.yaml b/aws-crossplane/local/aws/manifests/core.yaml similarity index 100% rename from aws-crossplane/cloud/aws/manifests/core.yaml rename to aws-crossplane/local/aws/manifests/core.yaml diff --git a/aws-crossplane/cloud/aws/manifests/credentials.yaml b/aws-crossplane/local/aws/manifests/credentials.yaml similarity index 100% rename from aws-crossplane/cloud/aws/manifests/credentials.yaml rename to aws-crossplane/local/aws/manifests/credentials.yaml diff --git a/aws-crossplane/cloud/aws/manifests/patch-and-transform-function.yaml b/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml similarity index 100% rename from aws-crossplane/cloud/aws/manifests/patch-and-transform-function.yaml rename to aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml diff --git a/aws-crossplane/cloud/aws/manifests/provider-config.yaml b/aws-crossplane/local/aws/manifests/provider-config.yaml similarity index 100% rename from aws-crossplane/cloud/aws/manifests/provider-config.yaml rename to aws-crossplane/local/aws/manifests/provider-config.yaml diff --git a/aws-crossplane/cloud/aws/manifests/secret.yaml b/aws-crossplane/local/aws/manifests/secret.yaml similarity index 100% rename from aws-crossplane/cloud/aws/manifests/secret.yaml rename to aws-crossplane/local/aws/manifests/secret.yaml diff --git a/aws-crossplane/cloud/aws/providers.yaml b/aws-crossplane/local/aws/providers.yaml similarity index 100% rename from aws-crossplane/cloud/aws/providers.yaml rename to aws-crossplane/local/aws/providers.yaml diff --git a/aws-crossplane/cloud/aws/update_credentials.sh b/aws-crossplane/local/aws/update_credentials.sh similarity index 100% rename from aws-crossplane/cloud/aws/update_credentials.sh rename to aws-crossplane/local/aws/update_credentials.sh diff --git a/aws-crossplane/local/configure-xfn.sh b/aws-crossplane/local/configure-xfn.sh new file mode 100644 index 0000000..deba414 --- /dev/null +++ b/aws-crossplane/local/configure-xfn.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Build the Docker image +docker build -t function-managed-flink . + +# Tag the image with the Gitea server URL +docker tag function-managed-flink gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 + +# Log in to the Gitea server +idpbuilder get secrets -p gitea -o json | jq -r '.[0].data.password' | docker login -u giteaAdmin --password-stdin gitea.cnoe.localtest.me:8443 + +# Push the image to the Gitea server +docker push gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 diff --git a/aws-crossplane/cloud/localstack/cert-creation/job.yaml b/aws-crossplane/local/localstack/cert-creation/job.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/cert-creation/job.yaml rename to aws-crossplane/local/localstack/cert-creation/job.yaml diff --git a/aws-crossplane/cloud/localstack/configs/function.yaml b/aws-crossplane/local/localstack/configs/function.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/configs/function.yaml rename to aws-crossplane/local/localstack/configs/function.yaml diff --git a/aws-crossplane/cloud/localstack/configs/local-secret.yaml b/aws-crossplane/local/localstack/configs/local-secret.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/configs/local-secret.yaml rename to aws-crossplane/local/localstack/configs/local-secret.yaml diff --git a/aws-crossplane/cloud/localstack/configs/provider-config-localstack.yaml b/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/configs/provider-config-localstack.yaml rename to aws-crossplane/local/localstack/configs/provider-config-localstack.yaml diff --git a/aws-crossplane/cloud/localstack/configs/providers.yaml b/aws-crossplane/local/localstack/configs/providers.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/configs/providers.yaml rename to aws-crossplane/local/localstack/configs/providers.yaml diff --git a/aws-crossplane/cloud/localstack/configs/services.yaml b/aws-crossplane/local/localstack/configs/services.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/configs/services.yaml rename to aws-crossplane/local/localstack/configs/services.yaml diff --git a/aws-crossplane/cloud/localstack/crossplane-configs.yaml b/aws-crossplane/local/localstack/crossplane-configs.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/crossplane-configs.yaml rename to aws-crossplane/local/localstack/crossplane-configs.yaml diff --git a/aws-crossplane/cloud/localstack/crossplane.yaml b/aws-crossplane/local/localstack/crossplane.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/crossplane.yaml rename to aws-crossplane/local/localstack/crossplane.yaml diff --git a/aws-crossplane/cloud/localstack/localstack.yaml b/aws-crossplane/local/localstack/localstack.yaml similarity index 100% rename from aws-crossplane/cloud/localstack/localstack.yaml rename to aws-crossplane/local/localstack/localstack.yaml diff --git a/aws-crossplane/cloud/localstack/port-forward-idp-localstack.sh b/aws-crossplane/local/localstack/port-forward-idp-localstack.sh similarity index 100% rename from aws-crossplane/cloud/localstack/port-forward-idp-localstack.sh rename to aws-crossplane/local/localstack/port-forward-idp-localstack.sh diff --git a/aws-crossplane/resources/flink/flink-comp.yaml b/aws-crossplane/resources/flink/flink-comp.yaml index 3d22a8b..54d51d4 100644 --- a/aws-crossplane/resources/flink/flink-comp.yaml +++ b/aws-crossplane/resources/flink/flink-comp.yaml @@ -2,12 +2,12 @@ apiVersion: apiextensions.crossplane.io/v1 kind: Composition metadata: - name: flinkbasic.kellinwood.com + name: flinkbasic.example.com labels: appReadyHandler: none spec: compositeTypeRef: - apiVersion: kellinwood.com/v1alpha1 + apiVersion: example.com/v1alpha1 kind: XManagedFlink mode: Pipeline pipeline: diff --git a/aws-crossplane/resources/flink/flink-example-claim.yaml b/aws-crossplane/resources/flink/flink-example-claim.yaml index 6a09ce0..6f7c7ea 100644 --- a/aws-crossplane/resources/flink/flink-example-claim.yaml +++ b/aws-crossplane/resources/flink/flink-example-claim.yaml @@ -1,4 +1,4 @@ -apiVersion: kellinwood.com/v1alpha1 +apiVersion: example.com/v1alpha1 kind: ManagedFlink metadata: name: flink-demo diff --git a/aws-crossplane/resources/flink/flink-xrd.yaml b/aws-crossplane/resources/flink/flink-xrd.yaml index f39594d..34bbaa1 100644 --- a/aws-crossplane/resources/flink/flink-xrd.yaml +++ b/aws-crossplane/resources/flink/flink-xrd.yaml @@ -1,9 +1,9 @@ apiVersion: apiextensions.crossplane.io/v1 kind: CompositeResourceDefinition metadata: - name: xmanagedflinks.kellinwood.com + name: xmanagedflinks.example.com spec: - group: kellinwood.com + group: example.com names: kind: XManagedFlink plural: xmanagedflinks diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml index f47a412..ec1d87f 100644 --- a/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml +++ b/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml @@ -2,10 +2,10 @@ apiVersion: apiextensions.crossplane.io/v1 kind: Composition metadata: - name: kinesisstreams.kellinwood.com + name: kinesisstreams.example.com spec: compositeTypeRef: - apiVersion: kellinwood.com/v1alpha1 + apiVersion: example.com/v1alpha1 kind: XKinesisStream mode: Pipeline pipeline: diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml index 84c3e26..e38e504 100644 --- a/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml +++ b/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml @@ -1,4 +1,4 @@ -apiVersion: kellinwood.com/v1alpha1 +apiVersion: example.com/v1alpha1 kind: KinesisStream metadata: name: my-kinesis-stream diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml index 9bbc9af..82b5a29 100644 --- a/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml +++ b/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml @@ -1,9 +1,9 @@ apiVersion: apiextensions.crossplane.io/v1 kind: CompositeResourceDefinition metadata: - name: xkinesisstreams.kellinwood.com + name: xkinesisstreams.example.com spec: - group: kellinwood.com + group: example.com names: kind: XKinesisStream plural: xkinesisstreams diff --git a/aws-crossplane/resources/s3/s3-bucket-comp.yaml b/aws-crossplane/resources/s3/s3-bucket-comp.yaml index d544186..046e7d6 100644 --- a/aws-crossplane/resources/s3/s3-bucket-comp.yaml +++ b/aws-crossplane/resources/s3/s3-bucket-comp.yaml @@ -2,10 +2,10 @@ apiVersion: apiextensions.crossplane.io/v1 kind: Composition metadata: - name: s3buckets.kellinwood.com + name: s3buckets.example.com spec: compositeTypeRef: - apiVersion: kellinwood.com/v1alpha1 + apiVersion: example.com/v1alpha1 kind: XS3Bucket mode: Pipeline pipeline: diff --git a/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml b/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml index 6ff823b..0d4ec6c 100644 --- a/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml +++ b/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml @@ -1,4 +1,4 @@ -apiVersion: kellinwood.com/v1alpha1 +apiVersion: example.com/v1alpha1 kind: S3Bucket metadata: name: flink-demo-bucket diff --git a/aws-crossplane/resources/s3/s3-bucket-xrd.yaml b/aws-crossplane/resources/s3/s3-bucket-xrd.yaml index a3cb835..1bee0a7 100644 --- a/aws-crossplane/resources/s3/s3-bucket-xrd.yaml +++ b/aws-crossplane/resources/s3/s3-bucket-xrd.yaml @@ -1,9 +1,9 @@ apiVersion: apiextensions.crossplane.io/v1 kind: CompositeResourceDefinition metadata: - name: xs3buckets.kellinwood.com + name: xs3buckets.example.com spec: - group: kellinwood.com + group: example.com names: kind: XS3Bucket plural: xs3buckets diff --git a/aws-crossplane/xfn/.gitignore b/aws-crossplane/xfn/.gitignore new file mode 100644 index 0000000..0e34e68 --- /dev/null +++ b/aws-crossplane/xfn/.gitignore @@ -0,0 +1,22 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +function-managed-flink.code-workspace diff --git a/aws-crossplane/xfn/.golangci.yml b/aws-crossplane/xfn/.golangci.yml new file mode 100644 index 0000000..1cbfbd0 --- /dev/null +++ b/aws-crossplane/xfn/.golangci.yml @@ -0,0 +1,208 @@ +run: + timeout: 10m + + skip-files: + - "zz_generated\\..+\\.go$" + +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + # [deprecated] comma-separated list of pairs of the form pkg:regex + # the regex is used to ignore names within pkg. (default "fmt:.*"). + # see https://github.com/kisielk/errcheck#the-deprecated-method for details + ignore: fmt:.*,io/ioutil:^Read.* + + govet: + # report about shadowed variables + check-shadowing: false + + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + + gci: + custom-order: true + sections: + - standard + - default + - prefix(github.com/crossplane) + - prefix(github.com/crossplane-contrib) + - blank + - dot + + gocyclo: + # minimal code complexity to report, 30 by default (but we recommend 10-20) + min-complexity: 10 + + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + + dupl: + # tokens count to trigger issue, 150 by default + threshold: 100 + + goconst: + # minimal length of string constant, 3 by default + min-len: 3 + # minimal occurrences count to trigger, 3 by default + min-occurrences: 5 + + lll: + # tab width in spaces. Default to 1. + tab-width: 1 + + unparam: + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 30 + + prealloc: + # XXX: we don't recommend using this linter before doing performance profiling. + # For most programs usage of prealloc will be a premature optimization. + + # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # True by default. + simple: true + range-loops: true # Report preallocation suggestions on range loops, true by default + for-loops: false # Report preallocation suggestions on for loops, false by default + + gocritic: + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. + # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - performance + + settings: # settings passed to gocritic + captLocal: # must be valid enabled check name + paramsOnly: true + rangeValCopy: + sizeThreshold: 32 + + nolintlint: + require-explanation: true + require-specific: true + + +linters: + enable: + - megacheck + - govet + - gocyclo + - gocritic + - goconst + - gci + - gofmt # We enable this as well as goimports for its simplify mode. + - prealloc + - revive + - unconvert + - misspell + - nakedret + - nolintlint + + disable: + # These linters are all deprecated as of golangci-lint v1.49.0. We disable + # them explicitly to avoid the linter logging deprecation warnings. + - deadcode + - varcheck + - scopelint + - structcheck + - interfacer + + presets: + - bugs + - unused + fast: false + + +issues: + # Excluding configuration per-path and per-linter + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test(ing)?\.go + linters: + - gocyclo + - errcheck + - dupl + - gosec + - scopelint + - unparam + + # Ease some gocritic warnings on test files. + - path: _test\.go + text: "(unnamedResult|exitAfterDefer)" + linters: + - gocritic + + # These are performance optimisations rather than style issues per se. + # They warn when function arguments or range values copy a lot of memory + # rather than using a pointer. + - text: "(hugeParam|rangeValCopy):" + linters: + - gocritic + + # This "TestMain should call os.Exit to set exit code" warning is not clever + # enough to notice that we call a helper method that calls os.Exit. + - text: "SA3000:" + linters: + - staticcheck + + - text: "k8s.io/api/core/v1" + linters: + - goimports + + # This is a "potential hardcoded credentials" warning. It's triggered by + # any variable with 'secret' in the same, and thus hits a lot of false + # positives in Kubernetes land where a Secret is an object type. + - text: "G101:" + linters: + - gosec + - gas + + # This is an 'errors unhandled' warning that duplicates errcheck. + - text: "G104:" + linters: + - gosec + - gas + + # Some k8s dependencies do not have JSON tags on all fields in structs. + - path: k8s.io/ + linters: + - musttag + + # Independently from option `exclude` we use default exclude patterns, + # it can be disabled by this option. To list all + # excluded by default patterns execute `golangci-lint run --help`. + # Default value for this option is true. + exclude-use-default: false + + # Show only new issues: if there are unstaged changes or untracked files, + # only those changes are analyzed, else only changes in HEAD~ are analyzed. + # It's a super-useful option for integration of golangci-lint into existing + # large codebase. It's not practical to fix all existing issues at the moment + # of integration: much better don't allow issues in new code. + # Default is false. + new: false + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/aws-crossplane/xfn/Dockerfile b/aws-crossplane/xfn/Dockerfile new file mode 100644 index 0000000..36b633a --- /dev/null +++ b/aws-crossplane/xfn/Dockerfile @@ -0,0 +1,48 @@ +# syntax=docker/dockerfile:1 + +# We use the latest Go 1.x version unless asked to use something else. +# The GitHub Actions CI job sets this argument for a consistent Go version. +ARG GO_VERSION=1 + +# Setup the base environment. The BUILDPLATFORM is set automatically by Docker. +# The --platform=${BUILDPLATFORM} flag tells Docker to build the function using +# the OS and architecture of the host running the build, not the OS and +# architecture that we're building the function for. +FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} AS build + +WORKDIR /fn + +# Most functions don't want or need CGo support, so we disable it. +# If CGo support is needed make sure to also change the base image to one that +# includes glibc, like 'distroless/base'. +ENV CGO_ENABLED=0 + +# We run go mod download in a separate step so that we can cache its results. +# This lets us avoid re-downloading modules if we don't need to. The type=target +# mount tells Docker to mount the current directory read-only in the WORKDIR. +# The type=cache mount tells Docker to cache the Go modules cache across builds. +RUN --mount=target=. --mount=type=cache,target=/go/pkg/mod go mod download + +# The TARGETOS and TARGETARCH args are set by docker. We set GOOS and GOARCH to +# these values to ask Go to compile a binary for these architectures. If +# TARGETOS and TARGETOS are different from BUILDPLATFORM, Go will cross compile +# for us (e.g. compile a linux/amd64 binary on a linux/arm64 build machine). +ARG TARGETOS +ARG TARGETARCH + +# Build the function binary. The type=target mount tells Docker to mount the +# current directory read-only in the WORKDIR. The type=cache mount tells Docker +# to cache the Go modules cache across builds. +RUN --mount=target=. \ + --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /function . + +# Produce the Function image. We use a very lightweight 'distroless' image that +# does not include any of the build tools used in previous stages. +FROM gcr.io/distroless/static-debian12:nonroot AS image +WORKDIR / +COPY --from=build /function /function +EXPOSE 9443 +USER nonroot:nonroot +ENTRYPOINT ["/function"] diff --git a/aws-crossplane/xfn/LICENSE b/aws-crossplane/xfn/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/aws-crossplane/xfn/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/aws-crossplane/xfn/NOTES.txt b/aws-crossplane/xfn/NOTES.txt new file mode 100644 index 0000000..7af36b2 --- /dev/null +++ b/aws-crossplane/xfn/NOTES.txt @@ -0,0 +1,9 @@ +To get started: + +1. Replace `function-template-go` with your function in `go.mod`, + `package/crossplane.yaml`, and any Go imports. (You can also do this + automatically by running the `./init.sh ` script.) +2. Update `input/v1beta1/` to reflect your desired input (and run `go generate ./...`) +3. Add your logic to `RunFunction` in `fn.go` +4. Add tests for your logic in `fn_test.go` +5. Update `README.md`, to be about your function! diff --git a/aws-crossplane/xfn/README.md b/aws-crossplane/xfn/README.md new file mode 100644 index 0000000..cdf7fb4 --- /dev/null +++ b/aws-crossplane/xfn/README.md @@ -0,0 +1,44 @@ +# function-template-go +[![CI](https://github.com/crossplane/function-template-go/actions/workflows/ci.yml/badge.svg)](https://github.com/crossplane/function-template-go/actions/workflows/ci.yml) + +A template for writing a [composition function][functions] in [Go][go]. + +To learn how to use this template: + +* [Follow the guide to writing a composition function in Go][function guide] +* [Learn about how composition functions work][functions] +* [Read the function-sdk-go package documentation][package docs] + +If you just want to jump in and get started: + +1. Replace `function-template-go` with your function in `go.mod`, + `package/crossplane.yaml`, and any Go imports. (You can also do this + automatically by running the `./init.sh ` script.) +1. Update `input/v1beta1/` to reflect your desired input (and run `go generate ./...`) +1. Add your logic to `RunFunction` in `fn.go` +1. Add tests for your logic in `fn_test.go` +1. Update this file, `README.md`, to be about your function! + +This template uses [Go][go], [Docker][docker], and the [Crossplane CLI][cli] to +build functions. + +```shell +# Run code generation - see input/generate.go +$ go generate ./... + +# Run tests - see fn_test.go +$ go test ./... + +# Build the function's runtime image - see Dockerfile +$ docker build . --tag=runtime + +# Build a function package - see package/crossplane.yaml +$ crossplane xpkg build -f package --embed-runtime-image=runtime +``` + +[functions]: https://docs.crossplane.io/latest/concepts/composition-functions +[go]: https://go.dev +[function guide]: https://docs.crossplane.io/knowledge-base/guides/write-a-composition-function-in-go +[package docs]: https://pkg.go.dev/github.com/crossplane/function-sdk-go +[docker]: https://www.docker.com +[cli]: https://docs.crossplane.io/latest/cli diff --git a/aws-crossplane/xfn/example/README.md b/aws-crossplane/xfn/example/README.md new file mode 100644 index 0000000..8b6a134 --- /dev/null +++ b/aws-crossplane/xfn/example/README.md @@ -0,0 +1,25 @@ +# Example manifests + +You can run your function locally and test it using `crossplane beta render` +with these example manifests. + +```shell +# Run the function locally +$ go run . --insecure --debug +``` + +```shell +# Then, in another terminal, call it with these example manifests +$ crossplane beta render xr.yaml composition.yaml functions.yaml -r +--- +apiVersion: example.crossplane.io/v1 +kind: XR +metadata: + name: example-xr +--- +apiVersion: render.crossplane.io/v1beta1 +kind: Result +message: I was run with input "Hello world"! +severity: SEVERITY_NORMAL +step: run-the-template +``` diff --git a/aws-crossplane/xfn/example/composition.yaml b/aws-crossplane/xfn/example/composition.yaml new file mode 100644 index 0000000..ff36c74 --- /dev/null +++ b/aws-crossplane/xfn/example/composition.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: function-managed-flink +spec: + compositeTypeRef: + apiVersion: example.crossplane.io/v1 + kind: XR + mode: Pipeline + pipeline: + - step: run-the-function + functionRef: + name: function-managed-flink + input: + apiVersion: template.fn.crossplane.io/v1beta1 + kind: Input diff --git a/aws-crossplane/xfn/example/functions.yaml b/aws-crossplane/xfn/example/functions.yaml new file mode 100644 index 0000000..c17cf82 --- /dev/null +++ b/aws-crossplane/xfn/example/functions.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink + annotations: + # This tells crossplane beta render to connect to the function locally. + render.crossplane.io/runtime: Development +spec: + # This is ignored when using the Development runtime. + package: function-managed-flink diff --git a/aws-crossplane/xfn/example/xr.yaml b/aws-crossplane/xfn/example/xr.yaml new file mode 100644 index 0000000..58e276d --- /dev/null +++ b/aws-crossplane/xfn/example/xr.yaml @@ -0,0 +1,22 @@ +apiVersion: example.com/v1alpha1 +kind: XManagedFlink +metadata: + name: flink-example-nkhnm +spec: + claimRef: + apiVersion: example.com/v1alpha1 + kind: ManagedFlink + name: flink-example + namespace: default + resourceConfig: + region: us-east-2 + codeBucket: flink-example-bucket + codeFile: example-flink-app-1.0-SNAPSHOT.jar + environmentProperties: + - propertyGroup: + - propertyGroupId: MyApplicationProperties + propertyMap: + EVENTS_EGRESS_STREAM_DEFAULT: example-egress-kinesis-stream + EVENTS_INGRESS_STREAM_DEFAULT: example-ingress-kinesis-stream + name: flink-example + parallelism: 1 diff --git a/aws-crossplane/xfn/fn.go b/aws-crossplane/xfn/fn.go new file mode 100644 index 0000000..f669c40 --- /dev/null +++ b/aws-crossplane/xfn/fn.go @@ -0,0 +1,221 @@ +package main + +import ( + "context" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/fieldpath" + "github.com/crossplane/crossplane-runtime/pkg/logging" + fnv1 "github.com/crossplane/function-sdk-go/proto/v1" + "github.com/crossplane/function-sdk-go/request" + "github.com/crossplane/function-sdk-go/resource" + "github.com/crossplane/function-sdk-go/response" + "gopkg.in/yaml.v3" +) + +// Function returns whatever response you ask it to. +type Function struct { + fnv1.UnimplementedFunctionRunnerServiceServer + + log logging.Logger +} + +// Helper function to create an array containing a map. In Unstructured, the arrays must be of type []interface{}, +// otherwise the Unstructured can't be coverted to Struct. +func arrayWithMap(value map[string]interface{}) []interface{} { + result := make([]interface{}, 1) + result[0] = value + return result +} + +// Get a value from the composite at the given path, and if not found return the defaultValue +func getValue(oxr *resource.Composite, path string, defaultValue any) (any, error) { + v, err := oxr.Resource.GetValue(path) + if err != nil { + if fieldpath.IsNotFound(err) { + return defaultValue, nil + } + } + return v, err +} + +const FLINK_APP_RESOURCE_NAME resource.Name = "flink-application" + +// RunFunction runs the Function. +func (f *Function) RunFunction(_ context.Context, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + f.log.Info("Running function", "tag", req.GetMeta().GetTag()) + + rsp := response.To(req, response.DefaultTTL) + + reqYaml, err := yaml.Marshal(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot marshal req to YAML %T", req)) + } + f.log.Info("Request", "YAML", string(reqYaml)) + + desired, err := request.GetDesiredComposedResources(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot get desired composed resources in %T", req)) + return rsp, nil + } + + oxr, err := request.GetObservedCompositeResource(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get observed XR from %T", req)) + return rsp, nil + } + + // Fetch required values from oxr.spec.resourceConfig + region, _ := oxr.Resource.GetValue("spec.resourceConfig.region") + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.region from %T", oxr)) + return rsp, nil + } + codeBucket, _ := oxr.Resource.GetValue("spec.resourceConfig.codeBucket") + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.codeBucket from %T", oxr)) + return rsp, nil + } + codeFile, _ := oxr.Resource.GetValue("spec.resourceConfig.codeFile") + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.codeFile from %T", oxr)) + return rsp, nil + } + environmentProperties, _ := oxr.Resource.GetValue("spec.resourceConfig.environmentProperties") + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.environmentProperties from %T", oxr)) + return rsp, nil + } + + // Fetch optional values from oxr.spec.resourceConfig + runtimeEnvironment, _ := getValue(oxr, "spec.resourceConfig.runtime", "FLINK-1_18") + snapshotsEnabled, _ := getValue(oxr, "spec.resourceConfig.snapshotsEnabled", true) + checkpointingEnabled, _ := getValue(oxr, "spec.resourceConfig.checkpointingEnabled", true) + checkpointIntervalMillis, _ := getValue(oxr, "spec.resourceConfig.checkpointIntervalMillis", 300000) // 5 minutes by default + logLevel, _ := getValue(oxr, "spec.resourceConfig.logLevel", "INFO") + metricsLevel, _ := getValue(oxr, "spec.resourceConfig.metricsLevel", "TASK") + autoScalingEnabled, _ := getValue(oxr, "spec.resourceConfig.autoScalingEnabled", false) + parallelism, _ := getValue(oxr, "spec.resourceConfig.parallelism", 1) + parallelismPerKpu, _ := getValue(oxr, "spec.resourceConfig.parallelismPerKpu", 1) + applicationRestoreType, _ := getValue(oxr, "spec.resourceConfig.applicationRestoreType", "RESTORE_FROM_LATEST_SNAPSHOT") + snapshotName, _ := getValue(oxr, "spec.resourceConfig.snapshotName", nil) + allowNonRestoredState, _ := getValue(oxr, "spec.resourceConfig.allowNonRestoredState", false) + + flinkAppDesired := resource.NewDesiredComposed() + desired[FLINK_APP_RESOURCE_NAME] = flinkAppDesired + + // Traverse environmentProperties and set AWS_REGION in all propertyMaps + epa := environmentProperties.([]interface{}) + for _, v := range epa { + epm := v.(map[string]interface{}) + pga := epm["propertyGroup"].([]interface{}) + for _, p := range pga { + pgm := p.(map[string]interface{}) + propertyMap := pgm["propertyMap"].(map[string]interface{}) + propertyMap["AWS_REGION"] = region + } + } + + flinkAppDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "kinesisanalyticsv2.aws.upbound.io/v1beta1", + "kind": "Application", + "metadata": map[string]interface{}{ + "name": oxr.Resource.GetClaimReference().Name, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "region": region, + "runtimeEnvironment": runtimeEnvironment, // "FLINK-1_18", + "applicationMode": "STREAMING", + "serviceExecutionRoleSelector": map[string]interface{}{ + "matchControllerRef": true, + }, + "applicationConfiguration": arrayWithMap(map[string]interface{}{ + "applicationCodeConfiguration": arrayWithMap(map[string]interface{}{ + "codeContentType": "ZIPFILE", + "codeContent": arrayWithMap(map[string]interface{}{ + "s3ContentLocation": arrayWithMap(map[string]interface{}{ + "fileKey": codeFile, + "bucketArnSelector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "crossplane.io/claim-name": codeBucket, + }, + }, + }), + }), + }), + "applicationSnapshotConfiguration": arrayWithMap(map[string]interface{}{ + "snapshotsEnabled": snapshotsEnabled, + }), + "environmentProperties": environmentProperties, + "flinkApplicationConfiguration": arrayWithMap(map[string]interface{}{ + "checkpointConfiguration": arrayWithMap(map[string]interface{}{ + "checkpointInterval": checkpointIntervalMillis, + "checkpointingEnabled": checkpointingEnabled, + "configurationType": "CUSTOM", + }), + "monitoringConfiguration": arrayWithMap(map[string]interface{}{ + "logLevel": logLevel, + "metricsLevel": metricsLevel, + "configurationType": "CUSTOM", + }), + "parallelismConfiguration": arrayWithMap(map[string]interface{}{ + "autoScalingEnabled": autoScalingEnabled, + "parallelism": parallelism, + "parallelismPerKpu": parallelismPerKpu, + "configurationType": "CUSTOM", + }), + }), + "runConfiguration": arrayWithMap(map[string]interface{}{ + "applicationRestoreConfiguration": arrayWithMap(map[string]interface{}{ + "applicationRestoreType": applicationRestoreType, + "snapshotName": snapshotName, + }), + "flinkRunConfiguration": arrayWithMap(map[string]interface{}{ + "allowNonRestoredState": allowNonRestoredState, + }), + }), + }), + "cloudwatchLoggingOptions": arrayWithMap(map[string]interface{}{ + "logStreamArnSelector": map[string]interface{}{ + "matchControllerRef": true, + }, + }), + }, + "providerConfigRef": map[string]interface{}{ + "name": "aws-provider", + }, + }, + } + + // Workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419. Don't set startApplication in the MR until the + // resource is READY, and continue to set it after it becomes RUNNING. + composed, _ := request.GetObservedComposedResources(req) + observedFlink, ok := composed[FLINK_APP_RESOURCE_NAME] + if ok { + v, err := observedFlink.Resource.GetValue("status.atProvider.status") + if err == nil && (v == "READY" || v == "RUNNING") { + flinkAppDesired.Resource.SetValue("spec.forProvider.startApplication", true) + } + } + + f.log.Info("response.Normal(rsp)") + + if err := response.SetDesiredComposedResources(rsp, desired); err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot set desired composed resources in %T", rsp)) + return rsp, err + + } + response.Normal(rsp, "response.Normal(rsp)") + f.log.Info("response.Normal(rsp)") + + // You can set a custom status condition on the claim. This allows you to + // communicate with the user. See the link below for status condition + // guidance. + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + response.ConditionTrue(rsp, "FunctionSuccess", "Success"). + TargetCompositeAndClaim() + + return rsp, nil +} diff --git a/aws-crossplane/xfn/fn_test.go b/aws-crossplane/xfn/fn_test.go new file mode 100644 index 0000000..2a573c1 --- /dev/null +++ b/aws-crossplane/xfn/fn_test.go @@ -0,0 +1,117 @@ +package main + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +// "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + + "github.com/crossplane/crossplane-runtime/pkg/logging" + fnv1 "github.com/crossplane/function-sdk-go/proto/v1" + "github.com/crossplane/function-sdk-go/resource" + "github.com/crossplane/function-sdk-go/response" +) + +func TestRunFunction(t *testing.T) { + + type args struct { + ctx context.Context + req *fnv1.RunFunctionRequest + } + type want struct { + rsp *fnv1.RunFunctionResponse + err error + } + + cases := map[string]struct { + reason string + args args + want want + }{ + "ResponseIsReturned": { + reason: "The Function should return a fatal result if no input was specified", + args: args{ + req: &fnv1.RunFunctionRequest{ + Meta: &fnv1.RequestMeta{Tag: "hello"}, + Input: resource.MustStructJSON(`{ + "apiVersion": "template.fn.crossplane.io/v1beta1", + "kind": "Input" + }`), + Observed: &fnv1.State{ + Composite: &fnv1.Resource{ + Resource: resource.MustStructJSON(`{ + "apiVersion": "example.com/v1alpha1", + "kind": "XManagedFlink", + "metadata": { + "name": "flink-demo", + "namespace": "default" + }, + "spec": { + "resourceConfig": { + "region": "us-east-2", + "name": "flink-test", + "codeBucket": "flink-test-bucket", + "codeFile": "flink-test-app.jar", + "environmentProperties": [{ + "propertyGroup": [{ + "propertyGroupId": "StatefunApplicationProperties", + "propertyMap": { + "EVENTS_INGRESS_STREAM_DEFAULT": "flink-test-ingress", + "EVENTS_EGRESS_STREAM_DEFAULT": "flink-demo-egress" + } + }] + }] + }, + "claimRef": { + "apiVersion": "example.com/v1alpha1", + "kind": "ManagedFlink", + "name": "flink-test", + "namespace": "default" + } + } + }`), + }, + }, + }, + }, + want: want{ + rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hello", Ttl: durationpb.New(response.DefaultTTL)}, + Results: []*fnv1.Result{ + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + Message: "response.Normal(rsp)", + Target: fnv1.Target_TARGET_COMPOSITE.Enum(), + }, + }, + Conditions: []*fnv1.Condition{ + { + Type: "FunctionSuccess", + Status: fnv1.Status_STATUS_CONDITION_TRUE, + Reason: "Success", + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + f := &Function{log: logging.NewNopLogger()} + _,/*rsp,*/ err := f.RunFunction(tc.args.ctx, tc.args.req) + + // if diff := cmp.Diff(tc.want.rsp, rsp, protocmp.Transform()); diff != "" { + // t.Errorf("%s\nf.RunFunction(...): -want rsp, +got rsp:\n%s", tc.reason, diff) + // } + + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("%s\nf.RunFunction(...): -want err, +got err:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/aws-crossplane/xfn/go.mod b/aws-crossplane/xfn/go.mod new file mode 100644 index 0000000..618d4d6 --- /dev/null +++ b/aws-crossplane/xfn/go.mod @@ -0,0 +1,76 @@ +module github.com/crossplane/function-managed-flink + +go 1.23 + +toolchain go1.23.2 + +require ( + github.com/alecthomas/kong v0.9.0 + github.com/crossplane/crossplane-runtime v1.18.0 + github.com/crossplane/function-sdk-go v0.4.0 + github.com/google/go-cmp v0.6.0 + google.golang.org/protobuf v1.34.3-0.20240816073751-94ecbc261689 + k8s.io/apimachinery v0.31.0 + sigs.k8s.io/controller-tools v0.16.0 +) + +require ( + dario.cat/mergo v1.0.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fatih/color v1.17.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.25.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.31.0 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/client-go v0.31.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 // indirect + sigs.k8s.io/controller-runtime v0.19.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/aws-crossplane/xfn/go.sum b/aws-crossplane/xfn/go.sum new file mode 100644 index 0000000..f2de160 --- /dev/null +++ b/aws-crossplane/xfn/go.sum @@ -0,0 +1,311 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= +github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/kong v0.9.0 h1:G5diXxc85KvoV2f0ZRVuMsi45IrBgx9zDNGNj165aPA= +github.com/alecthomas/kong v0.9.0/go.mod h1:Y47y5gKfHp1hDc7CH7OeXgLIpp+Q2m1Ni0L5s3bI8Os= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/antchfx/htmlquery v1.2.4 h1:qLteofCMe/KGovBI6SQgmou2QNyedFUW+pE+BpeZ494= +github.com/antchfx/htmlquery v1.2.4/go.mod h1:2xO6iu3EVWs7R2JYqBbp8YzG50gj/ofqs5/0VZoDZLc= +github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= +github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossplane/crossplane-runtime v1.18.0 h1:aAQIMNOgPbbXaqj9CUSv+gPl3QnVbn33YlzSe145//0= +github.com/crossplane/crossplane-runtime v1.18.0/go.mod h1:p7nVVsLn0CWjsLvLCtr7T40ErbTgNWKRxmYnwFdfXb4= +github.com/crossplane/function-sdk-go v0.4.0 h1:1jd+UIaZlVNQCUO4hLAgUqWBRnUKw2ObF9ZuMw5CpKk= +github.com/crossplane/function-sdk-go v0.4.0/go.mod h1:jLnzUG8pt8tn/U6/uvtNStAhDjhIq4wCR31yECT54NM= +github.com/crossplane/upjet v1.4.1-0.20240911184956-3afbb7796d46 h1:2IH1YPTBrNmBj0Z1OCjEBTrQCuRaLutZbWLaswFeCFQ= +github.com/crossplane/upjet v1.4.1-0.20240911184956-3afbb7796d46/go.mod h1:wkdZf/Cvhr6PI30VdHIOjg4dX39Z5uijqnLWFk5PbGM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1 h1:xcuWappghOVI8iNWoF2OKahVejd1LSVi/v4JED44Amo= +github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 h1:c5FlPPgxOn7kJz3VoPLkQYQXGBS3EklQ4Zfi57uOuqQ= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-cty v1.4.1-0.20200723130312-85980079f637 h1:Ud/6/AdmJ1R7ibdS0Wo5MWPj0T1R0fkpaD087bBaW8I= +github.com/hashicorp/go-cty v1.4.1-0.20200723130312-85980079f637/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= +github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= +github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwkB2+kXouLC932Li9qelozrc= +github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= +github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= +github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= +github.com/upbound/provider-aws v1.14.0 h1:DDUdlMp+dNlFXXlhsGdCvQD7qFdT1AsEcaqlRU3BO14= +github.com/upbound/provider-aws v1.14.0/go.mod h1:IvyvgGlhRVr737E4P75tyD/i53hxnyO7KPM8bbXH+SU= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.34.3-0.20240816073751-94ecbc261689 h1:hNwajDgT0MlsxZzlUajZVmUYFpts8/CYe4BSNx503ZE= +google.golang.org/protobuf v1.34.3-0.20240816073751-94ecbc261689/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= +k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-tools v0.16.0 h1:EJPB+a5Bve861SPBPPWRbP6bbKyNxqK12oYT5zEns9s= +sigs.k8s.io/controller-tools v0.16.0/go.mod h1:0I0xqjR65YTfoO12iR+mZR6s6UAVcUARgXRlsu0ljB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/aws-crossplane/xfn/init.sh b/aws-crossplane/xfn/init.sh new file mode 100755 index 0000000..523fbb5 --- /dev/null +++ b/aws-crossplane/xfn/init.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +# This script helps initialize a new function project by +# replacing all instances of function-template-go with the +# name of your function. The scripts accepts two arguments: +# 1. The name of your function +# 2. The path to your function directory + +set -e + +cd "$2" || return + +# Replace function-template-go with the name of your function +# in go.mod +perl -pi -e s,function-template-go,"$1",g go.mod +# in fn.go +perl -pi -e s,function-template-go,"$1",g fn.go +# in examples +perl -pi -e s,function-template-go,"$1",g example/* + +echo "Function $1 has been initialised successfully" diff --git a/aws-crossplane/xfn/input/generate.go b/aws-crossplane/xfn/input/generate.go new file mode 100644 index 0000000..551821d --- /dev/null +++ b/aws-crossplane/xfn/input/generate.go @@ -0,0 +1,15 @@ +//go:build generate +// +build generate + +// NOTE(negz): See the below link for details on what is happening here. +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module + +// Remove existing and generate new input manifests +//go:generate rm -rf ../package/input/ +//go:generate go run -tags generate sigs.k8s.io/controller-tools/cmd/controller-gen paths=./v1beta1 object crd:crdVersions=v1 output:artifacts:config=../package/input + +package input + +import ( + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" //nolint:typecheck +) diff --git a/aws-crossplane/xfn/input/v1beta1/input.go b/aws-crossplane/xfn/input/v1beta1/input.go new file mode 100644 index 0000000..c506bfc --- /dev/null +++ b/aws-crossplane/xfn/input/v1beta1/input.go @@ -0,0 +1,24 @@ +// Package v1beta1 contains the input type for this Function +// +kubebuilder:object:generate=true +// +groupName=template.fn.crossplane.io +// +versionName=v1beta1 +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This isn't a custom resource, in the sense that we never install its CRD. +// It is a KRM-like object, so we generate a CRD to describe its schema. + +// TODO: Add your input type here! It doesn't need to be called 'Input', you can +// rename it to anything you like. + +// Input can be used to provide input to this Function. +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:categories=crossplane +type Input struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} diff --git a/aws-crossplane/xfn/input/v1beta1/zz_generated.deepcopy.go b/aws-crossplane/xfn/input/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..3b7e60f --- /dev/null +++ b/aws-crossplane/xfn/input/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Input) DeepCopyInto(out *Input) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input. +func (in *Input) DeepCopy() *Input { + if in == nil { + return nil + } + out := new(Input) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Input) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/aws-crossplane/xfn/main.go b/aws-crossplane/xfn/main.go new file mode 100644 index 0000000..31a3acb --- /dev/null +++ b/aws-crossplane/xfn/main.go @@ -0,0 +1,38 @@ +// Package main implements a Composition Function. +package main + +import ( + "github.com/alecthomas/kong" + + "github.com/crossplane/function-sdk-go" +) + +// CLI of this Function. +type CLI struct { + Debug bool `short:"d" help:"Emit debug logs in addition to info logs."` + + Network string `help:"Network on which to listen for gRPC connections." default:"tcp"` + Address string `help:"Address at which to listen for gRPC connections." default:":9443"` + TLSCertsDir string `help:"Directory containing server certs (tls.key, tls.crt) and the CA used to verify client certificates (ca.crt)" env:"TLS_SERVER_CERTS_DIR"` + Insecure bool `help:"Run without mTLS credentials. If you supply this flag --tls-server-certs-dir will be ignored."` + MaxRecvMessageSize int `help:"Maximum size of received messages in MB." default:"4"` +} + +// Run this Function. +func (c *CLI) Run() error { + log, err := function.NewLogger(c.Debug) + if err != nil { + return err + } + + return function.Serve(&Function{log: log}, + function.Listen(c.Network, c.Address), + function.MTLSCertificates(c.TLSCertsDir), + function.Insecure(c.Insecure), + function.MaxRecvMessageSize(c.MaxRecvMessageSize*1024*1024)) +} + +func main() { + ctx := kong.Parse(&CLI{}, kong.Description("A Crossplane Composition Function.")) + ctx.FatalIfErrorf(ctx.Run()) +} diff --git a/aws-crossplane/xfn/package/crossplane.yaml b/aws-crossplane/xfn/package/crossplane.yaml new file mode 100644 index 0000000..efa9e69 --- /dev/null +++ b/aws-crossplane/xfn/package/crossplane.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: meta.pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink +spec: {} diff --git a/aws-crossplane/xfn/package/input/template.fn.crossplane.io_inputs.yaml b/aws-crossplane/xfn/package/input/template.fn.crossplane.io_inputs.yaml new file mode 100644 index 0000000..c7299ad --- /dev/null +++ b/aws-crossplane/xfn/package/input/template.fn.crossplane.io_inputs.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: inputs.template.fn.crossplane.io +spec: + group: template.fn.crossplane.io + names: + categories: + - crossplane + kind: Input + listKind: InputList + plural: inputs + singular: input + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: Input can be used to provide input to this Function. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + type: object + served: true + storage: true diff --git a/aws-crossplane/xfn/renovate.json b/aws-crossplane/xfn/renovate.json new file mode 100644 index 0000000..21e99bb --- /dev/null +++ b/aws-crossplane/xfn/renovate.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "crossplane": { + "fileMatch": ["(^|/)example/.*\\.ya?ml$"] + }, + "packageRules": [ + { + "matchManagers": ["crossplane"], + "matchFileNames": ["example/**"], + "groupName": "examples" + } + ], + "postUpdateOptions": [ + "gomodTidy", + "gomodUpdateImportPaths" + ] +} From 80fad548ec739957c304fb6fa4530e3d9c1fafe2 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Tue, 14 Jan 2025 14:16:39 -1000 Subject: [PATCH 22/28] Add EnvironmentConfig w/ AWS account ID placeholder --- aws-crossplane/local/aws/manifests/credentials.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/aws-crossplane/local/aws/manifests/credentials.yaml b/aws-crossplane/local/aws/manifests/credentials.yaml index c867f9e..7456042 100644 --- a/aws-crossplane/local/aws/manifests/credentials.yaml +++ b/aws-crossplane/local/aws/manifests/credentials.yaml @@ -10,4 +10,11 @@ kind: Secret metadata: creationTimestamp: null name: aws-secret - namespace: crossplane-system \ No newline at end of file + namespace: crossplane-system +--- +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: EnvironmentConfig +metadata: + name: aws-env-config +data: + awsAccountID: "000000000000" From 76dd68b444edd86570e32d0c0a2456d51097ddd2 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 16 Jan 2025 12:27:56 -1000 Subject: [PATCH 23/28] More refactoring and cleanup --- README.md | 50 +-- .../claims/managed-flink-claim.yaml | 32 +- aws-crossplane/launch-and-config-idp.sh | 75 ++++ .../local/aws/manifests/functions.yaml | 23 + .../patch-and-transform-function.yaml | 7 - .../local/aws/update_credentials.sh | 16 +- .../local/localstack/configs/function.yaml | 8 - .../local/localstack/configs/functions.yaml | 23 + .../localstack/configs/local-secret.yaml | 9 +- .../configs/provider-config-localstack.yaml | 7 - .../local/localstack/configs/services.yaml | 16 - .../resources/flink/flink-comp.yaml | 265 +---------- .../resources/flink/flink-example-claim.yaml | 33 +- aws-crossplane/resources/flink/flink-xrd.yaml | 73 ++- aws-crossplane/xfn/Dockerfile | 69 ++- .../{local => xfn}/configure-xfn.sh | 5 + aws-crossplane/xfn/example/composition.yaml | 11 + .../xfn/example/extra-resources.yaml | 7 + aws-crossplane/xfn/example/functions.yaml | 7 + aws-crossplane/xfn/example/xr.yaml | 29 +- aws-crossplane/xfn/fn.go | 415 ++++++++++++++++-- aws-crossplane/xfn/fn_test.go | 17 +- 22 files changed, 762 insertions(+), 435 deletions(-) create mode 100755 aws-crossplane/launch-and-config-idp.sh create mode 100644 aws-crossplane/local/aws/manifests/functions.yaml delete mode 100644 aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml delete mode 100644 aws-crossplane/local/localstack/configs/function.yaml create mode 100644 aws-crossplane/local/localstack/configs/functions.yaml rename aws-crossplane/{local => xfn}/configure-xfn.sh (83%) mode change 100644 => 100755 create mode 100644 aws-crossplane/xfn/example/extra-resources.yaml diff --git a/README.md b/README.md index c9de940..4dc899d 100644 --- a/README.md +++ b/README.md @@ -268,15 +268,11 @@ terraform destroy # When prompted, enter 'yes' - idpbuilder (https://github.com/cnoe-io/idpbuilder) - kubectl - jq -- python3 #### Introduction -This demo of provisioning via Crossplane is nowhere near production quality. It merely demonstrates that it is possible -to provision and run an AWS Managed Flink application via Crossplane. Many tasks normally performed via CI/CD must be -completed manually as described below. The compositions currently use `function-patch-and-transform` instead of a custom -composition function, and many things in the compositions remain hard-coded (AWS account number, region, ARNs in IAM -roles, etc). - +This demodemonstrates that it is possible to provision and run an AWS Managed Flink application via Crossplane. Many +tasks normally performed via CI/CD must be completed manually as described below. The compositions for S3 buckets and +Kinesis streams currently use `function-patch-and-transform`, but the Managed Flink composition uses a custom function. #### Instructions @@ -286,43 +282,29 @@ The files to run the crossplane demo are in the [aws-crossplane](./aws-crossplan cd aws-crossplane ``` -##### Start the local IDP configured to use AWS +##### Update the AWS credentials in the local environment Login to AWS Identity Center, and copy the AWS credential environment variables commands from Access Keys page. -Paste and execute the AWS environment variable commands, then run this script: - -``` -./local/aws/update_credentials.sh -``` - -Launch the local IDP using idpbuilder (https://github.com/cnoe-io/idpbuilder) +Paste and execute the AWS environment variable commands. -``` -idpbuilder create -p ./local/aws -``` +Set the AWS_ACCOUNT environment variable to your AWS account number, or run `aws configure sso` / `aws sso login`. +Setting the account number explicitly is optional if it can be determined instead via `aws sts get-caller-identity`. -The `idpbuilder create` command takes a few minutes to complete, and even then it will take more time for crossplane to -start and the providers to be loaded. - -Wait for the AWS providers to finish loading... - -``` -kubectl -n crossplane-system get pods | grep provider-aws +Finally, run the following script to update the AWS credentials for the local environment: +```shell +./local/aws/update_credentials.sh ``` -Wait until the command above returns a list of pods all in the `Running` state. +##### Launch and configure a Kubernetes cluster using the "idpbuilder" tool -##### Install the Crossplane resources (XRDs and Compositions) -Install the Composite Resource Definitions and Compositions required by the demo. Ignore the warnings issued by the -following command: +Run `./launch-and-config-idp.sh` -``` -for i in $(find resources -name \*xrd.yaml -o -name \*comp.yaml); do k apply -f $i; done -``` +This script will launch a local Kubernetes cluster using `kind`, and configure the cluster with the necessary +Crossplane providers and resources. It also builds and uploads the docker image for the Managed Flink composition +function. -At the time of this writing the demo does not utilize a custom composition function. Instead, it uses the off-the-shelf -function `function-patch-and-transform` which gets loaded during IDP creation, above. +The script takes a few minutes to complete. ##### Provision AWS Managed Flink via Crossplane claims diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index dd43cc0..80e61e7 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -5,17 +5,37 @@ metadata: namespace: default spec: resourceConfig: - region: us-east-2 - name: flink-cp-demo + startApplication: true codeBucket: flink-cp-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar - runtime: FLINK-1_18 - startApplication: true - parallelism: 1 + additionalPermissions: + managedPolicyArns: + - "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + inlinePolicies: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-ingress", + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:PutRecord" + ] + } + ] + } environmentProperties: - propertyGroup: - propertyGroupId: StatefunApplicationProperties propertyMap: EVENTS_INGRESS_STREAM_DEFAULT: flink-cp-demo-ingress EVENTS_EGRESS_STREAM_DEFAULT: flink-cp-demo-egress - AWS_REGION: us-east-2 diff --git a/aws-crossplane/launch-and-config-idp.sh b/aws-crossplane/launch-and-config-idp.sh new file mode 100755 index 0000000..5e2acea --- /dev/null +++ b/aws-crossplane/launch-and-config-idp.sh @@ -0,0 +1,75 @@ +#! /bin/bash + +# A helper script to launch and configure a local IDP +cd $(dirname $0) + +function main() { + + # Default to AWS, but allow localstack as well + cloud=${1:-aws} + if [ $cloud = "aws" ]; then + # Verify that the credentials are set and not expired + if grep =REPLACE local/aws/manifests/credentials.yaml >/dev/null || \ + grep 000000000000 local/aws/manifests/credentials.yaml >/dev/null || \ + [[ $(echo $(date +%s) - $(stat -f%m local/aws/manifests/credentials.yaml) | bc) -gt 43200 ]]; then + echo "The credentials in ./local/aws/manifests/credentials.yaml appear to have expired. Consider running ./local/aws/update_credentials.sh" + + # I haven't figured out how to refresh the credentials w/o restarting the cluster, so delete the cluster + if [ "$(idpbuilder get clusters)" ]; then + idpbuilder delete + fi + exit 1 + fi + fi + + if [ -z $(idpbuilder get clusters) ]; then + echo "Running: idpbuilder create -p local/$cloud" + idpbuilder create -p local/$cloud + echo + fi + + echo "Waiting for getea to be ready..." + wait_for_pods gitea my-gitea + echo + + echo "Building and loading the docker image for the Managed Flink composition function" + ./xfn/configure-xfn.sh + echo + + echo "Waiting for the Crossplane AWS providers to be ready..." + wait_for_pods crossplane-system provider-aws + echo + + echo "Loading the Crossplane Composite Resource Definitions and Compositions" + for i in $(find resources -name \*xrd.yaml -o -name \*comp.yaml); do + kubectl apply -f $i + done + echo + + echo "The system is ready for claims to be applied" +} + +# Wait for pods in the given namespace to be running +function wait_for_pods() { + namespace=$1 + pod_name_prefix=$2 + + running=0 + total=0 + + until [[ $total != 0 && $total == $running ]]; do + + sleep 2 + running=0 + total=0 + + for i in $(kubectl -n ${namespace} get pods | grep $pod_name_prefix | grep -v Completed | awk '{print $3}'); do + if [ $i == "Running" ]; then + running=$(echo $running + 1 | bc) + fi + total=$(echo $total + 1 | bc) + done + done +} + +main "$@" diff --git a/aws-crossplane/local/aws/manifests/functions.yaml b/aws-crossplane/local/aws/manifests/functions.yaml new file mode 100644 index 0000000..837df6c --- /dev/null +++ b/aws-crossplane/local/aws/manifests/functions.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-environment-configs +spec: + package: xpkg.upbound.io/crossplane-contrib/function-environment-configs:v0.2.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-patch-and-transform +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 +--- +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink +spec: + package: gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 + ignoreCrossplaneConstraints: true + skipDependencyResolution: true diff --git a/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml b/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml deleted file mode 100644 index 15c2626..0000000 --- a/aws-crossplane/local/aws/manifests/patch-and-transform-function.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: pkg.crossplane.io/v1 -kind: Function -metadata: - name: function-patch-and-transform -spec: - package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 diff --git a/aws-crossplane/local/aws/update_credentials.sh b/aws-crossplane/local/aws/update_credentials.sh index 365103e..9f366d6 100755 --- a/aws-crossplane/local/aws/update_credentials.sh +++ b/aws-crossplane/local/aws/update_credentials.sh @@ -1,20 +1,30 @@ #! /bin/bash -# Update manifests/credentials.yaml with values from environment variables +# Transfer account number and key/secret/session tokens to the local Crossplane AWS provider cd $(dirname $0) +if [ -z "$AWS_ACCOUNT" ]; then + # Attempt to resolve the account number via get-caller-identity + AWS_ACCOUNT=$(aws sts get-caller-identity --query Account --output text) + if [ -z "$AWS_ACCOUNT" ]; then + echo "AWS_ACCOUNT is not set, and finding it via \`aws sts get-caller-identity\` failed." + exit 1 + fi +fi + required_vars="AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN" for var in ${required_vars}; do val=$(eval "echo \$$var") if [ -z "$val" ]; then - echo "$var is not set" - exit 1 + echo "$var is not set" + exit 1 fi done git restore manifests/credentials.yaml cat manifests/credentials.yaml | \ + sed "s!000000000000!$AWS_ACCOUNT!" | \ sed "s!aws_access_key_id=REPLACE!aws_access_key_id=$AWS_ACCESS_KEY_ID!" | \ sed "s!aws_secret_access_key=REPLACE!aws_secret_access_key=$AWS_SECRET_ACCESS_KEY!" | \ sed "s!aws_session_token=REPLACE!aws_session_token=$AWS_SESSION_TOKEN!" >manifests/credentials.yaml.tmp diff --git a/aws-crossplane/local/localstack/configs/function.yaml b/aws-crossplane/local/localstack/configs/function.yaml deleted file mode 100644 index 9fac525..0000000 --- a/aws-crossplane/local/localstack/configs/function.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: pkg.crossplane.io/v1 -kind: Function -metadata: - name: function-patch-and-transform - namespace: crossplane-system -spec: - package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 diff --git a/aws-crossplane/local/localstack/configs/functions.yaml b/aws-crossplane/local/localstack/configs/functions.yaml new file mode 100644 index 0000000..837df6c --- /dev/null +++ b/aws-crossplane/local/localstack/configs/functions.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-environment-configs +spec: + package: xpkg.upbound.io/crossplane-contrib/function-environment-configs:v0.2.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-patch-and-transform +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 +--- +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink +spec: + package: gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 + ignoreCrossplaneConstraints: true + skipDependencyResolution: true diff --git a/aws-crossplane/local/localstack/configs/local-secret.yaml b/aws-crossplane/local/localstack/configs/local-secret.yaml index 3044636..92809e8 100644 --- a/aws-crossplane/local/localstack/configs/local-secret.yaml +++ b/aws-crossplane/local/localstack/configs/local-secret.yaml @@ -8,4 +8,11 @@ stringData: [default] aws_access_key_id = replaceme aws_secret_access_key = replaceme - aws_session_token = replaceme \ No newline at end of file + aws_session_token = replaceme +--- +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: EnvironmentConfig +metadata: + name: aws-env-config +data: + awsAccountID: "000000000000" diff --git a/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml b/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml index 7275662..4b6369a 100644 --- a/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml +++ b/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml @@ -14,18 +14,11 @@ spec: key: creds endpoint: services: - - dynamodb - iam - - lambda - s3 - - sqs - - sns - kinesis - - firehose - cloudwatch - logs - - secretsmanager - - eventbridge - kinesisanalyticsv2 hostnameImmutable: true url: diff --git a/aws-crossplane/local/localstack/configs/services.yaml b/aws-crossplane/local/localstack/configs/services.yaml index a41e3c7..4f89245 100644 --- a/aws-crossplane/local/localstack/configs/services.yaml +++ b/aws-crossplane/local/localstack/configs/services.yaml @@ -35,19 +35,3 @@ metadata: namespace: crossplane-system spec: package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.17.0 ---- -apiVersion: pkg.crossplane.io/v1 -kind: Provider -metadata: - name: provider-aws-cloudwatchevents - namespace: crossplane-system -spec: - package: xpkg.upbound.io/upbound/provider-aws-cloudwatchevents:v1.17.0 ---- -apiVersion: pkg.crossplane.io/v1 -kind: Provider -metadata: - name: provider-aws-lambda - namespace: crossplane-system -spec: - package: xpkg.upbound.io/upbound/provider-aws-lambda:v1.17.0 diff --git a/aws-crossplane/resources/flink/flink-comp.yaml b/aws-crossplane/resources/flink/flink-comp.yaml index 54d51d4..9880321 100644 --- a/aws-crossplane/resources/flink/flink-comp.yaml +++ b/aws-crossplane/resources/flink/flink-comp.yaml @@ -2,263 +2,24 @@ apiVersion: apiextensions.crossplane.io/v1 kind: Composition metadata: - name: flinkbasic.example.com - labels: - appReadyHandler: none + name: managedflink.example.com spec: compositeTypeRef: apiVersion: example.com/v1alpha1 kind: XManagedFlink mode: Pipeline pipeline: - - step: patch-and-transform + - step: environment-configs functionRef: - name: function-patch-and-transform + name: function-environment-configs input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: managed-flink-application - base: - apiVersion: kinesisanalyticsv2.aws.upbound.io/v1beta1 - kind: Application - metadata: - annotations: - meta.upbound.io/example-id: kinesisanalyticsv2/v1beta1/application - name: example - spec: - forProvider: - applicationConfiguration: - - applicationCodeConfiguration: - - codeContentType: ZIPFILE - codeContent: - - s3ContentLocation: - - fileKey: example-flink-application.jar - bucketArnSelector: - matchLabels: - crossplane.io/claim-name: example-bucket - applicationSnapshotConfiguration: - - snapshotsEnabled: true - environmentProperties: - - propertyGroup: - - propertyGroupId: MyAppProperties - propertyMap: - FOO: bar - AWS_REGION: us-west-1 - flinkApplicationConfiguration: - - checkpointConfiguration: - - checkpointInterval: 60000 # every minute # Update to 5 mins for production - checkpointingEnabled: true - configurationType: CUSTOM - monitoringConfiguration: - - logLevel: INFO - metricsLevel: TASK - configurationType: CUSTOM - parallelismConfiguration: - - autoScalingEnabled: false - parallelism: 2 - parallelismPerKpu: 1 - configurationType: CUSTOM - runConfiguration: - - applicationRestoreConfiguration: - - applicationRestoreType: RESTORE_FROM_LATEST_SNAPSHOT # RESTORE_FROM_CUSTOM_SNAPSHOT, RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT - # snapshotName: xyz # Specify this when restoreType = RESTORE_FROM_CUSTOM_SNAPSHOT - flinkRunConfiguration: - - allowNonRestoredState: false - applicationMode: STREAMING - cloudwatchLoggingOptions: - - logStreamArnSelector: - matchControllerRef: true - region: us-east-2 - runtimeEnvironment: FLINK-1_18 - serviceExecutionRoleSelector: - matchLabels: - rolePurpose: flink-application - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-application" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.codeFile - toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].fileKey - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.codeBucket - toFieldPath: spec.forProvider.applicationConfiguration[0].applicationCodeConfiguration[0].codeContent[0].s3ContentLocation[0].bucketArnSelector.matchLabels['crossplane.io/claim-name'] - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.runtime - toFieldPath: spec.forProvider.runtimeEnvironment - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.startApplication - toFieldPath: spec.forProvider.startApplication - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.parallelism - toFieldPath: spec.forProvider.applicationConfiguration[0].flinkApplicationConfiguration[0].parallelismConfiguration[0].parallelism - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.environmentProperties - toFieldPath: spec.forProvider.applicationConfiguration[0].environmentProperties - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.region - toFieldPath: spec.forProvider.region - - type: ToCompositeFieldPath - fromFieldPath: status.atProvider.arn - toFieldPath: status.managedFlinkArn - - type: ToCompositeFieldPath - fromFieldPath: status.atProvider.id - toFieldPath: status.managedFlinkName - - name: managed-flink-role - base: - apiVersion: iam.aws.upbound.io/v1beta1 - kind: Role - metadata: - annotations: - meta.upbound.io/example-id: iam/v1beta1/role - labels: - rolePurpose: flink-application - name: example - spec: - forProvider: - assumeRolePolicy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "kinesisanalytics.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - } - managedPolicyArns: - - arn:aws:iam::aws:policy/AmazonKinesisFullAccess - - arn:aws:iam::aws:policy/AmazonS3FullAccess - - arn:aws:iam::aws:policy/CloudWatchFullAccess - inlinePolicy: - - name: kinesis_policy - policy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": [ - "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-ingress", - "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-egress" - ], - "Action": [ - "kinesis:DescribeStream", - "kinesis:GetRecords", - "kinesis:GetShardIterator", - "kinesis:ListShards" - ] - } - ] - } - - name: logs_policy - policy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": [ - "arn:aws:logs:us-east-2:516535517513:log-group:flink-cp-demo-log-group" - ], - "Action": [ - "logs:DescribeLogGroups", - "logs:DescribeLogStreams", - "logs:PutLogEvents" - ] - } - ] - } - - name: metrics_policy - policy: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": "*", - "Action": [ - "cloudwatch:PutMetricData" - ] - } - ] - } - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-app-role" - - name: log-group - base: - apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 - kind: Group - metadata: - annotations: - meta.upbound.io/example-id: cloudwatchlogs/v1beta1/group - name: example - spec: - forProvider: - region: us-east-2 - retentionInDays: 7 - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-log-group" - - name: log-stream - base: - apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 - kind: Stream - metadata: - annotations: - meta.upbound.io/example-id: cloudwatchlogs/v1beta1/stream - name: example - spec: - forProvider: - logGroupNameSelector: - matchControllerRef: true - name: example - region: us-east-2 - providerConfigRef: - name: provider-aws - patches: - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: metadata.name - transforms: - - type: string - string: - type: Format - fmt: "%s-log-stream" - - type: FromCompositeFieldPath - fromFieldPath: spec.resourceConfig.name - toFieldPath: spec.forProvider.name - transforms: - - type: string - string: - type: Format - fmt: "%s-log-stream" - \ No newline at end of file + apiVersion: environmentconfigs.fn.crossplane.io/v1beta1 + kind: Input + spec: + environmentConfigs: + - type: Reference + ref: + name: aws-env-config + - step: render + functionRef: + name: function-managed-flink diff --git a/aws-crossplane/resources/flink/flink-example-claim.yaml b/aws-crossplane/resources/flink/flink-example-claim.yaml index 6f7c7ea..2dc4327 100644 --- a/aws-crossplane/resources/flink/flink-example-claim.yaml +++ b/aws-crossplane/resources/flink/flink-example-claim.yaml @@ -5,17 +5,38 @@ metadata: namespace: default spec: resourceConfig: - region: us-east-2 - name: flink-demo codeBucket: flink-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar - runtime: FLINK-1_18 - parallelism: 1 - # startApplication: true + # The composition will generate a role with basic permissions for the application (e.g. logging, metrics), but + # it doesn't know what additional permissions the application needs. You can specify them here... + additionalPermissions: + managedPolicyArns: + - "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + inlinePolicies: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress", + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:PutRecord" + ] + } + ] + } environmentProperties: - propertyGroup: - propertyGroupId: StatefunApplicationProperties propertyMap: EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress - AWS_REGION: us-east-2 diff --git a/aws-crossplane/resources/flink/flink-xrd.yaml b/aws-crossplane/resources/flink/flink-xrd.yaml index 34bbaa1..de6328f 100644 --- a/aws-crossplane/resources/flink/flink-xrd.yaml +++ b/aws-crossplane/resources/flink/flink-xrd.yaml @@ -16,25 +16,68 @@ spec: referenceable: true schema: openAPIV3Schema: + type: object properties: spec: + type: object properties: resourceConfig: + type: object properties: region: type: string - name: - type: string codeBucket: type: string codeFile: type: string + delayedStart: + type: boolean + delayStartBySeconds: + type: integer + startApplication: + type: boolean runtime: type: string + snapshotsEnabled: + type: string + checkpointingEnabled: + type: string + checkpointIntervalMillis: + type: number + logLevel: + type: string + metricsLevel: + type: string + autoScalingEnabled: + type: boolean parallelism: type: number - startApplication: + parallelismPerKpu: + type: number + applicationRestoreType: + type: string + snapshotName: + type: string + allowNonRestoredState: type: boolean + additionalPermissions: + type: object + properties: + managedPolicyArns: + type: array + items: + type: string + inlinePolicies: + type: array + items: + type: object + properties: + name: + type: string + policy: + type: string + items: + type: string environmentProperties: type: array items: @@ -51,13 +94,21 @@ spec: type: object additionalProperties: type: string - type: object - type: object + required: + - codeBucket + - codeFile + - environmentProperties + required: + - resourceConfig status: - properties: - managedFlinkName: - type: string - managedFlinkArn: - type: string type: object - type: object \ No newline at end of file + properties: + wa1419: + description: > + The object at 'status.wa1419' contains properties maintained by the composition function related + to the workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419" + type: object + properties: + readyAt: + type: integer + format: int64 diff --git a/aws-crossplane/xfn/Dockerfile b/aws-crossplane/xfn/Dockerfile index 36b633a..6ce07b1 100644 --- a/aws-crossplane/xfn/Dockerfile +++ b/aws-crossplane/xfn/Dockerfile @@ -1,48 +1,35 @@ -# syntax=docker/dockerfile:1 +FROM golang:1.23 as build-stage -# We use the latest Go 1.x version unless asked to use something else. -# The GitHub Actions CI job sets this argument for a consistent Go version. -ARG GO_VERSION=1 +WORKDIR /fn -# Setup the base environment. The BUILDPLATFORM is set automatically by Docker. -# The --platform=${BUILDPLATFORM} flag tells Docker to build the function using -# the OS and architecture of the host running the build, not the OS and -# architecture that we're building the function for. -FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} AS build +COPY . . +RUN go mod download -WORKDIR /fn -# Most functions don't want or need CGo support, so we disable it. -# If CGo support is needed make sure to also change the base image to one that -# includes glibc, like 'distroless/base'. -ENV CGO_ENABLED=0 - -# We run go mod download in a separate step so that we can cache its results. -# This lets us avoid re-downloading modules if we don't need to. The type=target -# mount tells Docker to mount the current directory read-only in the WORKDIR. -# The type=cache mount tells Docker to cache the Go modules cache across builds. -RUN --mount=target=. --mount=type=cache,target=/go/pkg/mod go mod download - -# The TARGETOS and TARGETARCH args are set by docker. We set GOOS and GOARCH to -# these values to ask Go to compile a binary for these architectures. If -# TARGETOS and TARGETOS are different from BUILDPLATFORM, Go will cross compile -# for us (e.g. compile a linux/amd64 binary on a linux/arm64 build machine). -ARG TARGETOS -ARG TARGETARCH - -# Build the function binary. The type=target mount tells Docker to mount the -# current directory read-only in the WORKDIR. The type=cache mount tells Docker -# to cache the Go modules cache across builds. -RUN --mount=target=. \ - --mount=type=cache,target=/go/pkg/mod \ - --mount=type=cache,target=/root/.cache/go-build \ - GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /function . - -# Produce the Function image. We use a very lightweight 'distroless' image that -# does not include any of the build tools used in previous stages. -FROM gcr.io/distroless/static-debian12:nonroot AS image +RUN CGO_ENABLED=0 go build -o /function . + +FROM debian:12.1-slim as package-stage + +# TODO(negz): Use a proper Crossplane package building tool. We're abusing the +# fact that this image won't have an io.crossplane.pkg: base annotation. This +# means Crossplane package manager will pull this entire ~100MB image, which +# also happens to contain a valid Function runtime. +# https://github.com/crossplane/crossplane/blob/v1.13.2/contributing/specifications/xpkg.md +WORKDIR /package +COPY package/ ./ + +RUN cat crossplane.yaml > /package.yaml +RUN cat input/*.yaml >> /package.yaml + +FROM gcr.io/distroless/base-debian11 AS build-release-stage + WORKDIR / -COPY --from=build /function /function + +COPY --from=build-stage /function /function +COPY --from=package-stage /package.yaml /package.yaml + EXPOSE 9443 + USER nonroot:nonroot -ENTRYPOINT ["/function"] + +ENTRYPOINT ["/function", "--debug"] diff --git a/aws-crossplane/local/configure-xfn.sh b/aws-crossplane/xfn/configure-xfn.sh old mode 100644 new mode 100755 similarity index 83% rename from aws-crossplane/local/configure-xfn.sh rename to aws-crossplane/xfn/configure-xfn.sh index deba414..142866e --- a/aws-crossplane/local/configure-xfn.sh +++ b/aws-crossplane/xfn/configure-xfn.sh @@ -1,5 +1,10 @@ #!/bin/bash +# Run this script to deploy the function to the local running IDP +cd $(dirname $0) + +go generate ./... + # Build the Docker image docker build -t function-managed-flink . diff --git a/aws-crossplane/xfn/example/composition.yaml b/aws-crossplane/xfn/example/composition.yaml index ff36c74..da038b0 100644 --- a/aws-crossplane/xfn/example/composition.yaml +++ b/aws-crossplane/xfn/example/composition.yaml @@ -8,6 +8,17 @@ spec: kind: XR mode: Pipeline pipeline: + - step: environment-configs + functionRef: + name: function-environment-configs + input: + apiVersion: environmentconfigs.fn.crossplane.io/v1beta1 + kind: Input + spec: + environmentConfigs: + - type: Reference + ref: + name: aws-env-config - step: run-the-function functionRef: name: function-managed-flink diff --git a/aws-crossplane/xfn/example/extra-resources.yaml b/aws-crossplane/xfn/example/extra-resources.yaml new file mode 100644 index 0000000..41428ee --- /dev/null +++ b/aws-crossplane/xfn/example/extra-resources.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: EnvironmentConfig +metadata: + name: aws-env-config +data: + awsAccountID: "000000000000" diff --git a/aws-crossplane/xfn/example/functions.yaml b/aws-crossplane/xfn/example/functions.yaml index c17cf82..69ee0e2 100644 --- a/aws-crossplane/xfn/example/functions.yaml +++ b/aws-crossplane/xfn/example/functions.yaml @@ -1,4 +1,11 @@ --- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-environment-configs +spec: + package: xpkg.upbound.io/crossplane-contrib/function-environment-configs:v0.2.0 +--- apiVersion: pkg.crossplane.io/v1beta1 kind: Function metadata: diff --git a/aws-crossplane/xfn/example/xr.yaml b/aws-crossplane/xfn/example/xr.yaml index 58e276d..09f0579 100644 --- a/aws-crossplane/xfn/example/xr.yaml +++ b/aws-crossplane/xfn/example/xr.yaml @@ -10,13 +10,38 @@ spec: namespace: default resourceConfig: region: us-east-2 + account: "000000000000" codeBucket: flink-example-bucket codeFile: example-flink-app-1.0-SNAPSHOT.jar + additionalPermissions: + managedPolicyArns: + - "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + inlinePolicies: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress", + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:PutRecord" + ] + } + ] + } environmentProperties: - propertyGroup: - propertyGroupId: MyApplicationProperties propertyMap: EVENTS_EGRESS_STREAM_DEFAULT: example-egress-kinesis-stream - EVENTS_INGRESS_STREAM_DEFAULT: example-ingress-kinesis-stream - name: flink-example + EVENTS_INGRESS_STREAM_DEFAULT: example-ingress-kinesis-stream parallelism: 1 diff --git a/aws-crossplane/xfn/fn.go b/aws-crossplane/xfn/fn.go index f669c40..cdb8aa5 100644 --- a/aws-crossplane/xfn/fn.go +++ b/aws-crossplane/xfn/fn.go @@ -2,15 +2,19 @@ package main import ( "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" "github.com/crossplane/crossplane-runtime/pkg/logging" + fncontext "github.com/crossplane/function-sdk-go/context" fnv1 "github.com/crossplane/function-sdk-go/proto/v1" "github.com/crossplane/function-sdk-go/request" "github.com/crossplane/function-sdk-go/resource" "github.com/crossplane/function-sdk-go/response" - "gopkg.in/yaml.v3" ) // Function returns whatever response you ask it to. @@ -20,6 +24,16 @@ type Function struct { log logging.Logger } +func getEnvironmentConfig(req *fnv1.RunFunctionRequest) (*unstructured.Unstructured, error) { + env := &unstructured.Unstructured{} + if v, ok := request.GetContextKey(req, fncontext.KeyEnvironment); ok { + if err := resource.AsObject(v.GetStructValue(), env); err != nil { + return env, fmt.Errorf("cannot get Composition environment from %T context key %q", req, fncontext.KeyEnvironment) + } + } + return env, nil +} + // Helper function to create an array containing a map. In Unstructured, the arrays must be of type []interface{}, // otherwise the Unstructured can't be coverted to Struct. func arrayWithMap(value map[string]interface{}) []interface{} { @@ -28,6 +42,14 @@ func arrayWithMap(value map[string]interface{}) []interface{} { return result } +func arrayWithMaps(values []map[string]interface{}) []interface{} { + result := make([]interface{}, len(values)) + for i, v := range values { + result[i] = v + } + return result +} + // Get a value from the composite at the given path, and if not found return the defaultValue func getValue(oxr *resource.Composite, path string, defaultValue any) (any, error) { v, err := oxr.Resource.GetValue(path) @@ -39,55 +61,105 @@ func getValue(oxr *resource.Composite, path string, defaultValue any) (any, erro return v, err } +func getArrayValue(oxr *resource.Composite, path string, defaultValue []interface{}) ([]interface{}, error) { + v, err := oxr.Resource.GetValue(path) + if err != nil { + if fieldpath.IsNotFound(err) { + return defaultValue, nil + } + } + array, ok := v.([]interface{}) + if ok { + return array, nil + } + return nil, errors.Errorf("Value at %s is not an array", path) +} + const FLINK_APP_RESOURCE_NAME resource.Name = "flink-application" +const LOG_GROUP_RESOURCE_NAME resource.Name = "flink-log-group" +const LOG_STREAM_RESOURCE_NAME resource.Name = "flink-log-stream" +const ROLE_RESOURCE_NAME resource.Name = "flink-role" -// RunFunction runs the Function. -func (f *Function) RunFunction(_ context.Context, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { - f.log.Info("Running function", "tag", req.GetMeta().GetTag()) +func RenderManagedFlinkResources(req *fnv1.RunFunctionRequest, rsp *fnv1.RunFunctionResponse, oxr *resource.Composite, log logging.Logger) (*fnv1.RunFunctionRequest, *fnv1.RunFunctionResponse) { - rsp := response.To(req, response.DefaultTTL) + desired, err := request.GetDesiredComposedResources(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot get desired composed resources in %T", req)) + return req, rsp + } - reqYaml, err := yaml.Marshal(req) + observed, err := request.GetObservedComposedResources(req) if err != nil { - response.Fatal(rsp, errors.Wrapf(err, "cannot marshal req to YAML %T", req)) + response.Fatal(rsp, errors.Wrapf(err, "cannot get observed composed resources in %T", req)) + return req, rsp } - f.log.Info("Request", "YAML", string(reqYaml)) - desired, err := request.GetDesiredComposedResources(req) + envConfig, err := getEnvironmentConfig(req) if err != nil { - response.Fatal(rsp, errors.Wrapf(err, "cannot get desired composed resources in %T", req)) - return rsp, nil + response.Fatal(rsp, errors.Wrapf(err, "cannot get env config in %T", rsp)) + return req, rsp } - oxr, err := request.GetObservedCompositeResource(req) + err = GenerateManagedFlink(rsp, desired, observed, oxr, log) if err != nil { - response.Fatal(rsp, errors.Wrapf(err, "Cannot get observed XR from %T", req)) - return rsp, nil + response.Fatal(rsp, errors.Wrap(err, "failed to render ManagedFlink resource")) + return req, rsp } - // Fetch required values from oxr.spec.resourceConfig - region, _ := oxr.Resource.GetValue("spec.resourceConfig.region") + err = GenerateRole(rsp, envConfig, desired, observed, oxr, log) if err != nil { - response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.region from %T", oxr)) - return rsp, nil + response.Fatal(rsp, errors.Wrap(err, "failed to render Role resource")) + return req, rsp + } + + err = GenerateLogGroup(rsp, desired, observed, oxr, log) + if err != nil { + response.Fatal(rsp, errors.Wrap(err, "failed to render LogGroup resource")) + return req, rsp } - codeBucket, _ := oxr.Resource.GetValue("spec.resourceConfig.codeBucket") + + err = GenerateLogStream(rsp, desired, observed, oxr, log) + if err != nil { + response.Fatal(rsp, errors.Wrap(err, "failed to render LogStream resource")) + return req, rsp + } + if err := response.SetDesiredComposedResources(rsp, desired); err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot set desired composed resources in %T", rsp)) + return req, rsp + } + + oxr.Resource.SetValue("metadata.managedFields", nil) + + if err := response.SetDesiredCompositeResource(rsp, oxr); err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot set desired composite resource in %T", rsp)) + return req, rsp + } + + return req, rsp +} + +func GenerateManagedFlink(rsp *fnv1.RunFunctionResponse, desired map[resource.Name]*resource.DesiredComposed, observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + // Fetch required values from oxr.spec.resourceConfig + codeBucket, err := oxr.Resource.GetValue("spec.resourceConfig.codeBucket") if err != nil { response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.codeBucket from %T", oxr)) - return rsp, nil + return err } - codeFile, _ := oxr.Resource.GetValue("spec.resourceConfig.codeFile") + codeFile, err := oxr.Resource.GetValue("spec.resourceConfig.codeFile") if err != nil { response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.codeFile from %T", oxr)) - return rsp, nil + return err } - environmentProperties, _ := oxr.Resource.GetValue("spec.resourceConfig.environmentProperties") + environmentProperties, err := oxr.Resource.GetValue("spec.resourceConfig.environmentProperties") if err != nil { response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.environmentProperties from %T", oxr)) - return rsp, nil + return err } // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + delayedStart, _ := getValue(oxr, "spec.resourceConfig.delayedStart", false) + startApplication, _ := getValue(oxr, "spec.resourceConfig.startApplication", false) runtimeEnvironment, _ := getValue(oxr, "spec.resourceConfig.runtime", "FLINK-1_18") snapshotsEnabled, _ := getValue(oxr, "spec.resourceConfig.snapshotsEnabled", true) checkpointingEnabled, _ := getValue(oxr, "spec.resourceConfig.checkpointingEnabled", true) @@ -115,6 +187,12 @@ func (f *Function) RunFunction(_ context.Context, req *fnv1.RunFunctionRequest) propertyMap["AWS_REGION"] = region } } + appRestoreConfig := map[string]interface{}{ + "applicationRestoreType": applicationRestoreType, + } + if snapshotName != nil { + appRestoreConfig["snapshotName"] = snapshotName + } flinkAppDesired.Resource.Object = map[string]interface{}{ "apiVersion": "kinesisanalyticsv2.aws.upbound.io/v1beta1", @@ -168,10 +246,7 @@ func (f *Function) RunFunction(_ context.Context, req *fnv1.RunFunctionRequest) }), }), "runConfiguration": arrayWithMap(map[string]interface{}{ - "applicationRestoreConfiguration": arrayWithMap(map[string]interface{}{ - "applicationRestoreType": applicationRestoreType, - "snapshotName": snapshotName, - }), + "applicationRestoreConfiguration": arrayWithMap(appRestoreConfig), "flinkRunConfiguration": arrayWithMap(map[string]interface{}{ "allowNonRestoredState": allowNonRestoredState, }), @@ -184,31 +259,293 @@ func (f *Function) RunFunction(_ context.Context, req *fnv1.RunFunctionRequest) }), }, "providerConfigRef": map[string]interface{}{ - "name": "aws-provider", + "name": "provider-aws", }, }, } - // Workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419. Don't set startApplication in the MR until the - // resource is READY, and continue to set it after it becomes RUNNING. - composed, _ := request.GetObservedComposedResources(req) - observedFlink, ok := composed[FLINK_APP_RESOURCE_NAME] + if delayedStart == true { + // Workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419. Don't set startApplication in the MR until a few minutes + // after the resource becomes READY + observedFlink, ok := observed[FLINK_APP_RESOURCE_NAME] + if ok { + managedFlinkUpdateLoopWorkaround(flinkAppDesired, &observedFlink, oxr, log) + } + } else if startApplication == true { + log.Info("Setting desiredFlink.spec.forProvider.startApplication=true") + flinkAppDesired.Resource.SetValue("spec.forProvider.startApplication", true) + } + + return nil // No error == Success +} + +const WA1419_COUNTER_PATH string = "status.wa1419.reqCounter" +const WA1419_READYAT_PATH string = "status.wa1419.readyAt" +const WA1419_STARTAPP_PATH string = "status.wa1419.startApplication" + +func managedFlinkUpdateLoopWorkaround(desiredFlink *resource.DesiredComposed, observedFlink *resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) { + // Workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419. Don't set startApplication in the MR until a few minutes + // after the resource becomes READY + v, err := observedFlink.Resource.GetValue("status.atProvider.status") + if err != nil { // if atProvider.status is unavailable, then there is nothing to do + log.Info("observed.status.atProvider.status is unavailable") + return + } + log.Info("observed.status.atProvider", "status", v) + + var readyAt int64 + ra, _ := getValue(oxr, WA1419_READYAT_PATH, int64(0)) + raFloat64, ok := ra.(float64) if ok { - v, err := observedFlink.Resource.GetValue("status.atProvider.status") - if err == nil && (v == "READY" || v == "RUNNING") { - flinkAppDesired.Resource.SetValue("spec.forProvider.startApplication", true) + readyAt = int64(raFloat64) + } else { + raInt64, ok := ra.(int64) + if ok { + readyAt = raInt64 + } else { + readyAt = int64(0) + } + } + + if v == "READY" { + log.Info(fmt.Sprintf("Got oxr.%s=%d", WA1419_READYAT_PATH, readyAt)) + if readyAt == 0 { + readyAt = time.Now().UnixMilli() + oxr.Resource.SetValue(WA1419_READYAT_PATH, readyAt) + log.Info(fmt.Sprintf("Set oxr.%s=%d", WA1419_READYAT_PATH, readyAt)) + } + } + + if readyAt > 0 { + nowMillis := time.Now().UnixMilli() + diffMillis := nowMillis - readyAt + ds, _ := getValue(oxr, "spec.resourceConfig.delayStartBySeconds", 120) // 2 minutes by default + log.Info("Timestamps", "nowMillis", nowMillis, "readyAtMillis", readyAt, "diff", diffMillis, "delayStartBySeconds", ds) + delayStartBySeconds, ok := ds.(int64) + if ok && diffMillis >= (delayStartBySeconds*time.Second.Milliseconds()) { + log.Info("Setting desiredFlink.spec.forProvider.startApplication=true") + desiredFlink.Resource.SetValue("spec.forProvider.startApplication", true) } } +} + +func GenerateLogGroup(rsp *fnv1.RunFunctionResponse, desired map[resource.Name]*resource.DesiredComposed, observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + logGroupDesired := resource.NewDesiredComposed() + desired[LOG_GROUP_RESOURCE_NAME] = logGroupDesired + + // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + logGroupName, _ := getValue(oxr, "spec.resourceConfig.logGroupName", oxr.Resource.GetClaimReference().Name+"-log-group") + retentionInDays, _ := getValue(oxr, "spec.resourceConfig.logRetentionInDays", 7) + + logGroupDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "cloudwatchlogs.aws.upbound.io/v1beta1", + "kind": "Group", + "metadata": map[string]interface{}{ + "name": logGroupName, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "region": region, + "retentionInDays": retentionInDays, + }, + "providerConfigRef": map[string]interface{}{ + "name": "provider-aws", + }, + }, + } + return nil // No error == Success +} + +func GenerateLogStream(rsp *fnv1.RunFunctionResponse, desired map[resource.Name]*resource.DesiredComposed, observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + logStreamDesired := resource.NewDesiredComposed() + desired[LOG_STREAM_RESOURCE_NAME] = logStreamDesired + + // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + logStreamName, _ := getValue(oxr, "spec.resourceConfig.logGroupName", oxr.Resource.GetClaimReference().Name+"-log-stream") + + logStreamDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "cloudwatchlogs.aws.upbound.io/v1beta1", + "kind": "Stream", + "metadata": map[string]interface{}{ + "name": logStreamName, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "region": region, + "name": logStreamName, + "logGroupNameSelector": map[string]interface{}{ + "matchControllerRef": true, + }, + }, + "providerConfigRef": map[string]interface{}{ + "name": "provider-aws", + }, + }, + } + return nil // No error == Success +} + +func GenerateRole(rsp *fnv1.RunFunctionResponse, envConfig *unstructured.Unstructured, desired map[resource.Name]*resource.DesiredComposed, + observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + roleDesired := resource.NewDesiredComposed() + desired[ROLE_RESOURCE_NAME] = roleDesired + + roleName := oxr.Resource.GetClaimReference().Name + "-role" + + // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + logGroupName, _ := getValue(oxr, "spec.resourceConfig.logGroupName", oxr.Resource.GetClaimReference().Name+"-log-group") + additionalManagedPolicyArns, err := getArrayValue(oxr, "spec.resourceConfig.additionalPermissions.managedPolicyArns", []interface{}{}) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.additionalPermissions.managedPolicyArns from %T", oxr)) + return err + } + additionalInlinePolicies, err := getArrayValue(oxr, "spec.resourceConfig.additionalPermissions.inlinePolicies", []interface{}{}) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.additionalPermissions.inlinePolicies from %T", oxr)) + return err + } + + assumeRolPolicy := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "kinesisanalytics.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + ` + + managedPolicyArnCount := 2 + len(additionalManagedPolicyArns) + + managedPolicyArns := make([]interface{}, managedPolicyArnCount) + managedPolicyArns[0] = "arn:aws:iam::aws:policy/AmazonS3FullAccess" + managedPolicyArns[1] = "arn:aws:iam::aws:policy/CloudWatchFullAccess" + for i, v := range additionalManagedPolicyArns { + managedPolicyArns[i+2] = v + } + + inlinePolicyCount := 2 + len(additionalInlinePolicies) + inlinePolicy := make([]map[string]interface{}, inlinePolicyCount) + + awsAccountID, ok := envConfig.Object["awsAccountID"] + if !ok { + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get awsAccountID from envConfig %T", envConfig)) + return err + } + } + + logGroupArn := fmt.Sprintf("arn:aws:logs:%s:%s:log-group:%s", region, awsAccountID, logGroupName) + + inlinePolicy[0] = map[string]interface{}{ + "name": "logs_policy", + "policy": fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ "%s" ], + "Action": [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ] + } + ] + }`, logGroupArn), + } + inlinePolicy[1] = map[string]interface{}{ + "name": "metrics_policy", + "policy": `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": "*", + "Action": [ + "cloudwatch:PutMetricData" + ] + } + ] + }`, + } + for i, v := range additionalInlinePolicies { + m, ok := v.(map[string]interface{}) + if ok { + inlinePolicy[i+2] = m + } else { + message := fmt.Sprintf("Entry at spec.resourceConfig.additionalPermissions.inlinePolicies[%d] is not a map", i) + response.Fatal(rsp, errors.Wrap(err, message)) + return errors.New(message) + } + } + + roleDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "iam.aws.upbound.io/v1beta1", + "kind": "Role", + "metadata": map[string]interface{}{ + "name": roleName, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "assumeRolePolicy": assumeRolPolicy, + "managedPolicyArns": managedPolicyArns, + "inlinePolicy": arrayWithMaps(inlinePolicy), + }, + "providerConfigRef": map[string]interface{}{ + "name": "provider-aws", + }, + }, + } + return nil // No error == Success +} + +// RunFunction runs the Function. +func (f *Function) RunFunction(_ context.Context, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := response.To(req, response.DefaultTTL) + + /* + reqYaml, err := yaml.Marshal(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot marshal req to YAML %T", req)) + } + f.log.Info("Request", "YAML", string(reqYaml)) + */ + + oxr, err := request.GetObservedCompositeResource(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get observed XR from %T", req)) + return rsp, nil + } + + metadataName, _ := getValue(oxr, "metadata.name", "nil") + f.log.Info("Running function", "metadata.name", metadataName) + + desired, err := request.GetDesiredComposedResources(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot get desired composed resources in %T", req)) + return rsp, nil + } - f.log.Info("response.Normal(rsp)") + RenderManagedFlinkResources(req, rsp, oxr, f.log) if err := response.SetDesiredComposedResources(rsp, desired); err != nil { response.Fatal(rsp, errors.Wrapf(err, "cannot set desired composed resources in %T", rsp)) return rsp, err } - response.Normal(rsp, "response.Normal(rsp)") - f.log.Info("response.Normal(rsp)") + response.Normalf(rsp, "Normal response for metadata.name=%s", metadataName) + f.log.Info("Normal response", "metadata.name", metadataName) // You can set a custom status condition on the claim. This allows you to // communicate with the user. See the link below for status condition diff --git a/aws-crossplane/xfn/fn_test.go b/aws-crossplane/xfn/fn_test.go index 2a573c1..ff3d3db 100644 --- a/aws-crossplane/xfn/fn_test.go +++ b/aws-crossplane/xfn/fn_test.go @@ -6,7 +6,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" -// "google.golang.org/protobuf/testing/protocmp" + + // "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/durationpb" "github.com/crossplane/crossplane-runtime/pkg/logging" @@ -52,9 +53,21 @@ func TestRunFunction(t *testing.T) { "spec": { "resourceConfig": { "region": "us-east-2", + "account": "000000000000", "name": "flink-test", "codeBucket": "flink-test-bucket", "codeFile": "flink-test-app.jar", + "additionalPermissions": { + "managedPolicyArns": [ + "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + ], + "inlinePolicies": [ + { + "name": "kinesis_policy", + "policy": " {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Resource\": [\n \"arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-ingress\",\n \"arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-egress\"\n ],\n \"Action\": [\n \"kinesis:DescribeStream\",\n \"kinesis:GetRecords\",\n \"kinesis:GetShardIterator\",\n \"kinesis:ListShards\",\n \"kinesis:PutRecord\"\n ]\n }\n ]\n }\n" + } + ] + }, "environmentProperties": [{ "propertyGroup": [{ "propertyGroupId": "StatefunApplicationProperties", @@ -103,7 +116,7 @@ func TestRunFunction(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { f := &Function{log: logging.NewNopLogger()} - _,/*rsp,*/ err := f.RunFunction(tc.args.ctx, tc.args.req) + _ /*rsp,*/, err := f.RunFunction(tc.args.ctx, tc.args.req) // if diff := cmp.Diff(tc.want.rsp, rsp, protocmp.Transform()); diff != "" { // t.Errorf("%s\nf.RunFunction(...): -want rsp, +got rsp:\n%s", tc.reason, diff) From fcfa04b099e6b906df720a36430f19fcf0918e51 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Thu, 16 Jan 2025 15:45:33 -1000 Subject: [PATCH 24/28] Use function-auto-ready for the ready status in the managed flink XR --- aws-crossplane/claims/managed-flink-claim.yaml | 11 ++++++++++- aws-crossplane/local/aws/manifests/functions.yaml | 7 +++++++ .../local/localstack/configs/functions.yaml | 7 +++++++ aws-crossplane/resources/flink/flink-comp.yaml | 3 +++ 4 files changed, 27 insertions(+), 1 deletion(-) diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index 80e61e7..71f8d23 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -5,7 +5,16 @@ metadata: namespace: default spec: resourceConfig: - startApplication: true + # OPTION 1, enable delayedStart and optionally override delayedStartBySeconds (defaults to 120). Do + # not set startApplication to true if you enable delayedStart + delayedStart: false + delayStartBySeconds: 120 + + # OPTION 2, enable startApplication=true sometime after the resource is created and in the Ready state + # startApplication: true + + # In either case, above, cross your fingers that you don't get the Running/Updating loop of death :) + codeBucket: flink-cp-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar additionalPermissions: diff --git a/aws-crossplane/local/aws/manifests/functions.yaml b/aws-crossplane/local/aws/manifests/functions.yaml index 837df6c..cedea8a 100644 --- a/aws-crossplane/local/aws/manifests/functions.yaml +++ b/aws-crossplane/local/aws/manifests/functions.yaml @@ -8,6 +8,13 @@ spec: --- apiVersion: pkg.crossplane.io/v1 kind: Function +metadata: + name: function-auto-ready +spec: + package: xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.4.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function metadata: name: function-patch-and-transform spec: diff --git a/aws-crossplane/local/localstack/configs/functions.yaml b/aws-crossplane/local/localstack/configs/functions.yaml index 837df6c..cedea8a 100644 --- a/aws-crossplane/local/localstack/configs/functions.yaml +++ b/aws-crossplane/local/localstack/configs/functions.yaml @@ -8,6 +8,13 @@ spec: --- apiVersion: pkg.crossplane.io/v1 kind: Function +metadata: + name: function-auto-ready +spec: + package: xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.4.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function metadata: name: function-patch-and-transform spec: diff --git a/aws-crossplane/resources/flink/flink-comp.yaml b/aws-crossplane/resources/flink/flink-comp.yaml index 9880321..fd6abfd 100644 --- a/aws-crossplane/resources/flink/flink-comp.yaml +++ b/aws-crossplane/resources/flink/flink-comp.yaml @@ -23,3 +23,6 @@ spec: - step: render functionRef: name: function-managed-flink + - step: auto-ready-composite-resource # Auto-sets ready on the XR when the composed resources are ready + functionRef: + name: function-auto-ready From 5995737b7b3d439f2b50526650a35f437d3baaa5 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Tue, 21 Jan 2025 07:13:42 -1000 Subject: [PATCH 25/28] Enable debug output for the aws kinesisanalyticsv2 provider --- README.md | 1 + aws-crossplane/claims/managed-flink-claim.yaml | 2 +- .../local/aws/manifests/aws-services.yaml | 18 ++++++++++++++++++ aws-terraform/main.tf | 2 +- 4 files changed, 21 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4dc899d..1f716a5 100644 --- a/README.md +++ b/README.md @@ -268,6 +268,7 @@ terraform destroy # When prompted, enter 'yes' - idpbuilder (https://github.com/cnoe-io/idpbuilder) - kubectl - jq +- go lang #### Introduction This demodemonstrates that it is possible to provision and run an AWS Managed Flink application via Crossplane. Many diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index 71f8d23..3e6abe6 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -11,7 +11,7 @@ spec: delayStartBySeconds: 120 # OPTION 2, enable startApplication=true sometime after the resource is created and in the Ready state - # startApplication: true + startApplication: true # In either case, above, cross your fingers that you don't get the Running/Updating loop of death :) diff --git a/aws-crossplane/local/aws/manifests/aws-services.yaml b/aws-crossplane/local/aws/manifests/aws-services.yaml index 1c0ce13..4ab97ed 100644 --- a/aws-crossplane/local/aws/manifests/aws-services.yaml +++ b/aws-crossplane/local/aws/manifests/aws-services.yaml @@ -1,3 +1,17 @@ +apiVersion: pkg.crossplane.io/v1beta1 +kind: DeploymentRuntimeConfig +metadata: + name: debug-config +spec: + deploymentTemplate: + spec: + selector: {} + template: + spec: + containers: + - name: package-runtime + args: + - --debug --- apiVersion: pkg.crossplane.io/v1 kind: Provider @@ -34,6 +48,10 @@ spec: package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.17.0 controllerConfigRef: name: aws-config + runtimeConfigRef: + apiVersion: pkg.crossplane.io/v1beta1 + kind: DeploymentRuntimeConfig + name: debug-config --- apiVersion: pkg.crossplane.io/v1 kind: Provider diff --git a/aws-terraform/main.tf b/aws-terraform/main.tf index 9c483e8..a8dcff9 100644 --- a/aws-terraform/main.tf +++ b/aws-terraform/main.tf @@ -185,7 +185,7 @@ resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { code_content { s3_content_location { bucket_arn = aws_s3_bucket.flink_demo_bucket.arn - file_key = "my-stateful-functions-embedded-java-3.3.0.jar.1" + file_key = "my-stateful-functions-embedded-java-3.3.0.jar" } } code_content_type = "ZIPFILE" From 232cdd93a2743522e0fbaf686d342e4a8defbe1e Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Tue, 21 Jan 2025 13:00:42 -1000 Subject: [PATCH 26/28] Remove setting of cloudWatchLoggingOptions and previous update loop workaround --- README.md | 14 ++-- .../claims/managed-flink-claim.yaml | 10 --- aws-crossplane/launch-and-config-idp.sh | 7 ++ aws-crossplane/resources/flink/flink-xrd.yaml | 18 ----- aws-crossplane/xfn/fn.go | 79 +++---------------- 5 files changed, 22 insertions(+), 106 deletions(-) diff --git a/README.md b/README.md index 1f716a5..fe6ddd3 100644 --- a/README.md +++ b/README.md @@ -333,25 +333,21 @@ aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://${flink_b ##### Provision the Managed Flink application -Apply the following claim to trigger the creation of the Flink application, its role, and log groups. Note that by -default Flink application will become 'Ready' since `startApplication: true` is commented-out in the claim. Do not -uncomment this line yet. +Apply the following claim to trigger the creation of the Flink application, its role, log group, and log stream. +Note at the time of this writing, the Flink application is not configured with the log stream to workaround a bug in +the Crossplane provider (https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419). ``` kubectl apply -f claims/managed-flink-claim.yaml ``` -Visit the AWS Managed Flink applications page in the web console. When the application status becomes `Ready`, -uncomment the `startAppication: true` line in the `managed-flink-claim.yaml` file and re-run -the `kubectl apply -f claims/managed-flink-claim.yaml` command. If the initial claim apply is performed -with `startApplication: true` then Crossplane appears to go into a loop where it updates the application every few -minutes, and so it switches back and forth between `Running` and `Updating` :( - Wait until the Flink application is in the 'Running' state. This may take a few minutes. #### Monitor the CloudWatch logging output +See the note above re: logging config. Until the bug is fixed, no log output will be available. + The following script will show all the log entries from the start of application launch, and will wait for new entries to arrive and display them too. The script will resume from where it left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml index 3e6abe6..4744a9b 100644 --- a/aws-crossplane/claims/managed-flink-claim.yaml +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -5,16 +5,6 @@ metadata: namespace: default spec: resourceConfig: - # OPTION 1, enable delayedStart and optionally override delayedStartBySeconds (defaults to 120). Do - # not set startApplication to true if you enable delayedStart - delayedStart: false - delayStartBySeconds: 120 - - # OPTION 2, enable startApplication=true sometime after the resource is created and in the Ready state - startApplication: true - - # In either case, above, cross your fingers that you don't get the Running/Updating loop of death :) - codeBucket: flink-cp-demo-bucket codeFile: my-stateful-functions-embedded-java-3.3.0.jar additionalPermissions: diff --git a/aws-crossplane/launch-and-config-idp.sh b/aws-crossplane/launch-and-config-idp.sh index 5e2acea..acb865d 100755 --- a/aws-crossplane/launch-and-config-idp.sh +++ b/aws-crossplane/launch-and-config-idp.sh @@ -40,6 +40,13 @@ function main() { wait_for_pods crossplane-system provider-aws echo + + echo "Waiting for the Crossplane AWS provider configs to be ready..." + until [[ $(kubectl get providerconfigs 2>&1 | grep aws | wc -l) -eq 2 ]]; do + sleep 2 + done + echo + echo "Loading the Crossplane Composite Resource Definitions and Compositions" for i in $(find resources -name \*xrd.yaml -o -name \*comp.yaml); do kubectl apply -f $i diff --git a/aws-crossplane/resources/flink/flink-xrd.yaml b/aws-crossplane/resources/flink/flink-xrd.yaml index de6328f..cf6042b 100644 --- a/aws-crossplane/resources/flink/flink-xrd.yaml +++ b/aws-crossplane/resources/flink/flink-xrd.yaml @@ -30,12 +30,6 @@ spec: type: string codeFile: type: string - delayedStart: - type: boolean - delayStartBySeconds: - type: integer - startApplication: - type: boolean runtime: type: string snapshotsEnabled: @@ -100,15 +94,3 @@ spec: - environmentProperties required: - resourceConfig - status: - type: object - properties: - wa1419: - description: > - The object at 'status.wa1419' contains properties maintained by the composition function related - to the workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419" - type: object - properties: - readyAt: - type: integer - format: int64 diff --git a/aws-crossplane/xfn/fn.go b/aws-crossplane/xfn/fn.go index cdb8aa5..1160b50 100644 --- a/aws-crossplane/xfn/fn.go +++ b/aws-crossplane/xfn/fn.go @@ -3,7 +3,6 @@ package main import ( "context" "fmt" - "time" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -158,8 +157,7 @@ func GenerateManagedFlink(rsp *fnv1.RunFunctionResponse, desired map[resource.Na // Fetch optional values from oxr.spec.resourceConfig region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") - delayedStart, _ := getValue(oxr, "spec.resourceConfig.delayedStart", false) - startApplication, _ := getValue(oxr, "spec.resourceConfig.startApplication", false) + startApplication, _ := getValue(oxr, "spec.resourceConfig.startApplication", true) runtimeEnvironment, _ := getValue(oxr, "spec.resourceConfig.runtime", "FLINK-1_18") snapshotsEnabled, _ := getValue(oxr, "spec.resourceConfig.snapshotsEnabled", true) checkpointingEnabled, _ := getValue(oxr, "spec.resourceConfig.checkpointingEnabled", true) @@ -206,6 +204,7 @@ func GenerateManagedFlink(rsp *fnv1.RunFunctionResponse, desired map[resource.Na "region": region, "runtimeEnvironment": runtimeEnvironment, // "FLINK-1_18", "applicationMode": "STREAMING", + "startApplication": startApplication, "serviceExecutionRoleSelector": map[string]interface{}{ "matchControllerRef": true, }, @@ -252,11 +251,14 @@ func GenerateManagedFlink(rsp *fnv1.RunFunctionResponse, desired map[resource.Na }), }), }), - "cloudwatchLoggingOptions": arrayWithMap(map[string]interface{}{ - "logStreamArnSelector": map[string]interface{}{ - "matchControllerRef": true, - }, - }), + // NOTE: For now, don't set cloudWatchLoggingOptions as a workaround for the endless + // Updating loop (https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419) + + // "cloudwatchLoggingOptions": arrayWithMap(map[string]interface{}{ + // "logStreamArnSelector": map[string]interface{}{ + // "matchControllerRef": true, + // }, + // }), }, "providerConfigRef": map[string]interface{}{ "name": "provider-aws", @@ -264,70 +266,9 @@ func GenerateManagedFlink(rsp *fnv1.RunFunctionResponse, desired map[resource.Na }, } - if delayedStart == true { - // Workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419. Don't set startApplication in the MR until a few minutes - // after the resource becomes READY - observedFlink, ok := observed[FLINK_APP_RESOURCE_NAME] - if ok { - managedFlinkUpdateLoopWorkaround(flinkAppDesired, &observedFlink, oxr, log) - } - } else if startApplication == true { - log.Info("Setting desiredFlink.spec.forProvider.startApplication=true") - flinkAppDesired.Resource.SetValue("spec.forProvider.startApplication", true) - } - return nil // No error == Success } -const WA1419_COUNTER_PATH string = "status.wa1419.reqCounter" -const WA1419_READYAT_PATH string = "status.wa1419.readyAt" -const WA1419_STARTAPP_PATH string = "status.wa1419.startApplication" - -func managedFlinkUpdateLoopWorkaround(desiredFlink *resource.DesiredComposed, observedFlink *resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) { - // Workaround for https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419. Don't set startApplication in the MR until a few minutes - // after the resource becomes READY - v, err := observedFlink.Resource.GetValue("status.atProvider.status") - if err != nil { // if atProvider.status is unavailable, then there is nothing to do - log.Info("observed.status.atProvider.status is unavailable") - return - } - log.Info("observed.status.atProvider", "status", v) - - var readyAt int64 - ra, _ := getValue(oxr, WA1419_READYAT_PATH, int64(0)) - raFloat64, ok := ra.(float64) - if ok { - readyAt = int64(raFloat64) - } else { - raInt64, ok := ra.(int64) - if ok { - readyAt = raInt64 - } else { - readyAt = int64(0) - } - } - - if v == "READY" { - log.Info(fmt.Sprintf("Got oxr.%s=%d", WA1419_READYAT_PATH, readyAt)) - if readyAt == 0 { - readyAt = time.Now().UnixMilli() - oxr.Resource.SetValue(WA1419_READYAT_PATH, readyAt) - log.Info(fmt.Sprintf("Set oxr.%s=%d", WA1419_READYAT_PATH, readyAt)) - } - } - - if readyAt > 0 { - nowMillis := time.Now().UnixMilli() - diffMillis := nowMillis - readyAt - ds, _ := getValue(oxr, "spec.resourceConfig.delayStartBySeconds", 120) // 2 minutes by default - log.Info("Timestamps", "nowMillis", nowMillis, "readyAtMillis", readyAt, "diff", diffMillis, "delayStartBySeconds", ds) - delayStartBySeconds, ok := ds.(int64) - if ok && diffMillis >= (delayStartBySeconds*time.Second.Milliseconds()) { - log.Info("Setting desiredFlink.spec.forProvider.startApplication=true") - desiredFlink.Resource.SetValue("spec.forProvider.startApplication", true) - } - } -} func GenerateLogGroup(rsp *fnv1.RunFunctionResponse, desired map[resource.Name]*resource.DesiredComposed, observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { logGroupDesired := resource.NewDesiredComposed() From 86482dde830702fc4d855144447ded000b247e41 Mon Sep 17 00:00:00 2001 From: Ken Ellinwood Date: Tue, 8 Apr 2025 14:34:07 -1000 Subject: [PATCH 27/28] Minor fixes, try latest upbound/provider version --- README.md | 9 +++++++-- aws-crossplane/launch-and-config-idp.sh | 2 +- aws-crossplane/local/aws/manifests/aws-services.yaml | 10 +++++----- .../com/example/stateful_functions/Configuration.java | 2 -- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index fe6ddd3..be02003 100644 --- a/README.md +++ b/README.md @@ -372,10 +372,15 @@ wait for new entries to arrive and display them too. Manually delete the files in the S3 bucket, and delete the Kinesis stream `flink-demo-ingress` (the Flink application adds a fanout consumer to the stream which will block any deletion attempted by Crossplane). +``` +aws s3 rm --recursive s3://$(aws s3 ls | grep flink-cp-demo | awk3) +aws kinesis delete-stream --enforce-consumer-deletion --stream-name flink-cp-demo-ingress +``` + Run the following commands to delete the remaining resources: ``` -kubectl delete -f resources/claims/managed-flink-claims.yaml -kubectl delete -f resources/claims/demo-setup-claims.yaml +kubectl delete -f claims/managed-flink-claim.yaml +kubectl delete -f claims/demo-setup-claims.yaml ``` Shut down the local IDP with the command: diff --git a/aws-crossplane/launch-and-config-idp.sh b/aws-crossplane/launch-and-config-idp.sh index acb865d..4860a00 100755 --- a/aws-crossplane/launch-and-config-idp.sh +++ b/aws-crossplane/launch-and-config-idp.sh @@ -28,7 +28,7 @@ function main() { echo fi - echo "Waiting for getea to be ready..." + echo "Waiting for gitea to be ready..." wait_for_pods gitea my-gitea echo diff --git a/aws-crossplane/local/aws/manifests/aws-services.yaml b/aws-crossplane/local/aws/manifests/aws-services.yaml index 4ab97ed..53c4979 100644 --- a/aws-crossplane/local/aws/manifests/aws-services.yaml +++ b/aws-crossplane/local/aws/manifests/aws-services.yaml @@ -18,7 +18,7 @@ kind: Provider metadata: name: provider-aws-s3 spec: - package: xpkg.upbound.io/upbound/provider-aws-s3:v1.17.0 + package: xpkg.upbound.io/upbound/provider-aws-s3:v1.21.1 controllerConfigRef: name: aws-config --- @@ -27,7 +27,7 @@ kind: Provider metadata: name: provider-aws-iam spec: - package: xpkg.upbound.io/upbound/provider-aws-iam:v1.17.0 + package: xpkg.upbound.io/upbound/provider-aws-iam:v1.21.1 controllerConfigRef: name: aws-config --- @@ -36,7 +36,7 @@ kind: Provider metadata: name: provider-aws-kinesis spec: - package: xpkg.upbound.io/upbound/provider-aws-kinesis:v1.17.0 + package: xpkg.upbound.io/upbound/provider-aws-kinesis:v1.21.1 controllerConfigRef: name: aws-config --- @@ -45,7 +45,7 @@ kind: Provider metadata: name: provider-aws-kinesisanalyticsv2 spec: - package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.17.0 + package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.21.1 controllerConfigRef: name: aws-config runtimeConfigRef: @@ -58,6 +58,6 @@ kind: Provider metadata: name: provider-aws-cloudwatchlogs spec: - package: xpkg.upbound.io/upbound/provider-aws-cloudwatchlogs:v1.17.0 + package: xpkg.upbound.io/upbound/provider-aws-cloudwatchlogs:v1.21.1 controllerConfigRef: name: aws-config diff --git a/src/main/java/com/example/stateful_functions/Configuration.java b/src/main/java/com/example/stateful_functions/Configuration.java index 7647bdc..5984abb 100644 --- a/src/main/java/com/example/stateful_functions/Configuration.java +++ b/src/main/java/com/example/stateful_functions/Configuration.java @@ -1,14 +1,12 @@ package com.example.stateful_functions; import com.amazonaws.services.kinesisanalytics.runtime.KinesisAnalyticsRuntime; -import org.apache.flink.kinesis.shaded.com.amazonaws.services.dynamodbv2.xspec.S; import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.lang.reflect.Field; import java.util.Locale; -import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Properties; From dcc87623e1b4c28a2b2081f16a6681b5f8f8eb7e Mon Sep 17 00:00:00 2001 From: NathanTippy Date: Tue, 22 Apr 2025 17:18:36 -0500 Subject: [PATCH 28/28] added dependencies AWS wants in the jar --- pom.xml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 561ba45..5d820a5 100644 --- a/pom.xml +++ b/pom.xml @@ -91,6 +91,12 @@ + + + org.apache.flink + statefun-flink-core + ${statefun.version} + com.amazonaws aws-kinesisanalytics-runtime @@ -276,7 +282,8 @@ ${project.build.directory}/flink-plugins false true - true + + false