diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 993d39f..1c20266 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -4,6 +4,9 @@ aliases: core-ack-team: - a-hilaly - RedbackThomson + - jaypipes - jljaco - # TODO: Add your team members' GitHub aliases to the team alias - service-team: [] + service-team: + - embano1 + - baldawar + - nikp diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index a1fb0d6..452f1a4 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2023-03-10T13:44:08Z" - build_hash: 8d86a782fb08c15cd1e1c6ba82653a6af55e6378 + build_date: "2023-03-14T23:37:52Z" + build_hash: 0888419ec6825035cae1fdee2ceffd7c1ac73ca8 go_version: go1.20 - version: v0.24.2-9-g8d86a78 -api_directory_checksum: bf5e3c94a538d590a95720e9e8c5b4e4e2c87f09 + version: v0.24.3-5-g0888419 +api_directory_checksum: 0501e26102bccaff71f18fddd72d0e48a5fdeeb6 api_version: v1alpha1 aws_sdk_go_version: v1.44.218 generator_config_info: - file_checksum: a5fbd6693bd0f143f3468b7644e6228d3a9a4600 + file_checksum: 43c88522fc802a0f70ed4c695964dfdca1a2fea2 original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index 55502e1..d66bcda 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -1,5 +1,61 @@ ignore: resource_names: - - Pipe + # - Pipe sdk_names: - model: pipes \ No newline at end of file + client_struct: Pipes + client_interface: PipesAPI +resources: + Pipe: + fields: + DesiredState: + compare: + is_ignored: true + SourceParameters: + compare: + is_ignored: true + TargetParameters: + compare: + is_ignored: true + EnrichmentParameters: + compare: + is_ignored: true + StateReason: + is_read_only: true + from: + operation: DescribePipe + path: StateReason + hooks: + delta_pre_compare: + code: customPreCompare(delta, a, b) + sdk_create_post_set_output: + template_path: hooks/pipe/sdk_create_post_set_output.go.tpl + sdk_update_pre_build_request: + template_path: hooks/pipe/sdk_update_pre_build_request.go.tpl + sdk_update_pre_set_output: + template_path: hooks/pipe/sdk_update_pre_set_output.go.tpl + sdk_update_post_build_request: + template_path: hooks/pipe/sdk_update_post_build_request.go.tpl + sdk_delete_post_request: + template_path: hooks/pipe/sdk_delete_post_request.go.tpl + print: + add_age_column: true + add_synced_column: true + additional_columns: + - name: ARN + json_path: .status.ackResourceMetadata.arn + type: string + priority: 1 + - name: SOURCE + json_path: .spec.source + type: string + priority: 1 + - name: TARGET + json_path: .spec.target + type: string + priority: 1 + - name: STATE + json_path: .status.currentState + type: string + exceptions: + terminal_codes: + - ValidationException diff --git a/apis/v1alpha1/pipe.go b/apis/v1alpha1/pipe.go new file mode 100644 index 0000000..d20440f --- /dev/null +++ b/apis/v1alpha1/pipe.go @@ -0,0 +1,112 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PipeSpec defines the desired state of Pipe. +// +// An object that represents a pipe. Amazon EventBridgePipes connect event sources +// to targets and reduces the need for specialized knowledge and integration +// code. +type PipeSpec struct { + + // A description of the pipe. + Description *string `json:"description,omitempty"` + // The state the pipe should be in. + DesiredState *string `json:"desiredState,omitempty"` + // The ARN of the enrichment resource. + Enrichment *string `json:"enrichment,omitempty"` + // The parameters required to set up enrichment on your pipe. + EnrichmentParameters *PipeEnrichmentParameters `json:"enrichmentParameters,omitempty"` + // The name of the pipe. + // +kubebuilder:validation:Required + Name *string `json:"name"` + // The ARN of the role that allows the pipe to send data to the target. + // +kubebuilder:validation:Required + RoleARN *string `json:"roleARN"` + // The ARN of the source resource. + // +kubebuilder:validation:Required + Source *string `json:"source"` + // The parameters required to set up a source for your pipe. + SourceParameters *PipeSourceParameters `json:"sourceParameters,omitempty"` + // The list of key-value pairs to associate with the pipe. + Tags map[string]*string `json:"tags,omitempty"` + // The ARN of the target resource. + // +kubebuilder:validation:Required + Target *string `json:"target"` + // The parameters required to set up a target for your pipe. + TargetParameters *PipeTargetParameters `json:"targetParameters,omitempty"` +} + +// PipeStatus defines the observed state of Pipe +type PipeStatus struct { + // All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + // that is used to contain resource sync state, account ownership, + // constructed ARN for the resource + // +kubebuilder:validation:Optional + ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` + // All CRS managed by ACK have a common `Status.Conditions` member that + // contains a collection of `ackv1alpha1.Condition` objects that describe + // the various terminal states of the CR and its backend AWS service API + // resource + // +kubebuilder:validation:Optional + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // The time the pipe was created. + // +kubebuilder:validation:Optional + CreationTime *metav1.Time `json:"creationTime,omitempty"` + // The state the pipe is in. + // +kubebuilder:validation:Optional + CurrentState *string `json:"currentState,omitempty"` + // When the pipe was last updated, in ISO-8601 format (https://www.w3.org/TR/NOTE-datetime) + // (YYYY-MM-DDThh:mm:ss.sTZD). + // +kubebuilder:validation:Optional + LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` + // The reason the pipe is in its current state. + // +kubebuilder:validation:Optional + StateReason *string `json:"stateReason,omitempty"` +} + +// Pipe is the Schema for the Pipes API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="ARN",type=string,priority=1,JSONPath=`.status.ackResourceMetadata.arn` +// +kubebuilder:printcolumn:name="SOURCE",type=string,priority=1,JSONPath=`.spec.source` +// +kubebuilder:printcolumn:name="STATE",type=string,priority=0,JSONPath=`.status.currentState` +// +kubebuilder:printcolumn:name="TARGET",type=string,priority=1,JSONPath=`.spec.target` +// +kubebuilder:printcolumn:name="Synced",type="string",priority=0,JSONPath=".status.conditions[?(@.type==\"ACK.ResourceSynced\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",priority=0,JSONPath=".metadata.creationTimestamp" +type Pipe struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PipeSpec `json:"spec,omitempty"` + Status PipeStatus `json:"status,omitempty"` +} + +// PipeList contains a list of Pipe +// +kubebuilder:object:root=true +type PipeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Pipe `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Pipe{}, &PipeList{}) +} diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index 9b23f5e..634459c 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -27,3 +27,635 @@ var ( _ = &aws.JSONValue{} _ = ackv1alpha1.AWSAccountID("") ) + +// This structure specifies the VPC subnets and security groups for the task, +// and whether a public IP address is to be used. This structure is relevant +// only for ECS tasks that use the awsvpc network mode. +type AWSVPCConfiguration struct { + AssignPublicIP *string `json:"assignPublicIP,omitempty"` + SecurityGroups []*string `json:"securityGroups,omitempty"` + Subnets []*string `json:"subnets,omitempty"` +} + +// The array properties for the submitted job, such as the size of the array. +// The array size can be between 2 and 10,000. If you specify array properties +// for a job, it becomes an array job. This parameter is used only if the target +// is an Batch job. +type BatchArrayProperties struct { + Size *int64 `json:"size,omitempty"` +} + +// The overrides that are sent to a container. +type BatchContainerOverrides struct { + Command []*string `json:"command,omitempty"` + Environment []*BatchEnvironmentVariable `json:"environment,omitempty"` + InstanceType *string `json:"instanceType,omitempty"` + ResourceRequirements []*BatchResourceRequirement `json:"resourceRequirements,omitempty"` +} + +// The environment variables to send to the container. You can add new environment +// variables, which are added to the container at launch, or you can override +// the existing environment variables from the Docker image or the task definition. +// +// Environment variables cannot start with "Batch". This naming convention is +// reserved for variables that Batch sets. +type BatchEnvironmentVariable struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// An object that represents an Batch job dependency. +type BatchJobDependency struct { + JobID *string `json:"jobID,omitempty"` + Type *string `json:"type_,omitempty"` +} + +// The type and amount of a resource to assign to a container. The supported +// resources include GPU, MEMORY, and VCPU. +type BatchResourceRequirement struct { + Type *string `json:"type_,omitempty"` + Value *string `json:"value,omitempty"` +} + +// The retry strategy that's associated with a job. For more information, see +// Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) +// in the Batch User Guide. +type BatchRetryStrategy struct { + Attempts *int64 `json:"attempts,omitempty"` +} + +// The details of a capacity provider strategy. To learn more, see CapacityProviderStrategyItem +// (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CapacityProviderStrategyItem.html) +// in the Amazon ECS API Reference. +type CapacityProviderStrategyItem struct { + Base *int64 `json:"base,omitempty"` + CapacityProvider *string `json:"capacityProvider,omitempty"` + Weight *int64 `json:"weight,omitempty"` +} + +// A DeadLetterConfig object that contains information about a dead-letter queue +// configuration. +type DeadLetterConfig struct { + ARN *string `json:"arn,omitempty"` +} + +// The overrides that are sent to a container. An empty container override can +// be passed in. An example of an empty container override is {"containerOverrides": +// [ ] }. If a non-empty container override is specified, the name parameter +// must be included. +type ECSContainerOverride struct { + Command []*string `json:"command,omitempty"` + CPU *int64 `json:"cpu,omitempty"` + Environment []*ECSEnvironmentVariable `json:"environment,omitempty"` + EnvironmentFiles []*ECSEnvironmentFile `json:"environmentFiles,omitempty"` + Memory *int64 `json:"memory,omitempty"` + MemoryReservation *int64 `json:"memoryReservation,omitempty"` + Name *string `json:"name,omitempty"` + ResourceRequirements []*ECSResourceRequirement `json:"resourceRequirements,omitempty"` +} + +// A list of files containing the environment variables to pass to a container. +// You can specify up to ten environment files. The file must have a .env file +// extension. Each line in an environment file should contain an environment +// variable in VARIABLE=VALUE format. Lines beginning with # are treated as +// comments and are ignored. For more information about the environment variable +// file syntax, see Declare default environment variables in file (https://docs.docker.com/compose/env-file/). +// +// If there are environment variables specified using the environment parameter +// in a container definition, they take precedence over the variables contained +// within an environment file. If multiple environment files are specified that +// contain the same variable, they're processed from the top down. We recommend +// that you use unique variable names. For more information, see Specifying +// environment variables (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// This parameter is only supported for tasks hosted on Fargate using the following +// platform versions: +// +// - Linux platform version 1.4.0 or later. +// +// - Windows platform version 1.0.0 or later. +type ECSEnvironmentFile struct { + Type *string `json:"type_,omitempty"` + Value *string `json:"value,omitempty"` +} + +// The environment variables to send to the container. You can add new environment +// variables, which are added to the container at launch, or you can override +// the existing environment variables from the Docker image or the task definition. +// You must also specify a container name. +type ECSEnvironmentVariable struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// The amount of ephemeral storage to allocate for the task. This parameter +// is used to expand the total amount of ephemeral storage available, beyond +// the default amount, for tasks hosted on Fargate. For more information, see +// Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) +// in the Amazon ECS User Guide for Fargate. +// +// This parameter is only supported for tasks hosted on Fargate using Linux +// platform version 1.4.0 or later. This parameter is not supported for Windows +// containers on Fargate. +type ECSEphemeralStorage struct { + SizeInGiB *int64 `json:"sizeInGiB,omitempty"` +} + +// Details on an Elastic Inference accelerator task override. This parameter +// is used to override the Elastic Inference accelerator specified in the task +// definition. For more information, see Working with Amazon Elastic Inference +// on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/userguide/ecs-inference.html) +// in the Amazon Elastic Container Service Developer Guide. +type ECSInferenceAcceleratorOverride struct { + DeviceName *string `json:"deviceName,omitempty"` + DeviceType *string `json:"deviceType,omitempty"` +} + +// The type and amount of a resource to assign to a container. The supported +// resource types are GPUs and Elastic Inference accelerators. For more information, +// see Working with GPUs on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html) +// or Working with Amazon Elastic Inference on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html) +// in the Amazon Elastic Container Service Developer Guide +type ECSResourceRequirement struct { + Type *string `json:"type_,omitempty"` + Value *string `json:"value,omitempty"` +} + +// The overrides that are associated with a task. +type ECSTaskOverride struct { + ContainerOverrides []*ECSContainerOverride `json:"containerOverrides,omitempty"` + CPU *string `json:"cpu,omitempty"` + // The amount of ephemeral storage to allocate for the task. This parameter + // is used to expand the total amount of ephemeral storage available, beyond + // the default amount, for tasks hosted on Fargate. For more information, see + // Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) + // in the Amazon ECS User Guide for Fargate. + // + // This parameter is only supported for tasks hosted on Fargate using Linux + // platform version 1.4.0 or later. This parameter is not supported for Windows + // containers on Fargate. + EphemeralStorage *ECSEphemeralStorage `json:"ephemeralStorage,omitempty"` + ExecutionRoleARN *string `json:"executionRoleARN,omitempty"` + InferenceAcceleratorOverrides []*ECSInferenceAcceleratorOverride `json:"inferenceAcceleratorOverrides,omitempty"` + Memory *string `json:"memory,omitempty"` + TaskRoleARN *string `json:"taskRoleARN,omitempty"` +} + +// Filter events using an event pattern. For more information, see Events and +// Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) +// in the Amazon EventBridge User Guide. +type Filter struct { + Pattern *string `json:"pattern,omitempty"` +} + +// The collection of event patterns used to filter events. For more information, +// see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) +// in the Amazon EventBridge User Guide. +type FilterCriteria struct { + Filters []*Filter `json:"filters,omitempty"` +} + +// The Secrets Manager secret that stores your broker credentials. +type MQBrokerAccessCredentials struct { + // // Optional SecretManager ARN which stores the database credentials + BasicAuth *string `json:"basicAuth,omitempty"` +} + +// The Secrets Manager secret that stores your stream credentials. +type MSKAccessCredentials struct { + // // Optional SecretManager ARN which stores the database credentials + ClientCertificateTLSAuth *string `json:"clientCertificateTLSAuth,omitempty"` + // // Optional SecretManager ARN which stores the database credentials + SASLSCRAM512Auth *string `json:"saslSCRAM512Auth,omitempty"` +} + +// This structure specifies the network configuration for an Amazon ECS task. +type NetworkConfiguration struct { + // This structure specifies the VPC subnets and security groups for the task, + // and whether a public IP address is to be used. This structure is relevant + // only for ECS tasks that use the awsvpc network mode. + AWSVPCConfiguration *AWSVPCConfiguration `json:"awsVPCConfiguration,omitempty"` +} + +// These are custom parameter to be used when the target is an API Gateway REST +// APIs or EventBridge ApiDestinations. In the latter case, these are merged +// with any InvocationParameters specified on the Connection, with any values +// from the Connection taking precedence. +type PipeEnrichmentHTTPParameters struct { + HeaderParameters map[string]*string `json:"headerParameters,omitempty"` + PathParameterValues []*string `json:"pathParameterValues,omitempty"` + QueryStringParameters map[string]*string `json:"queryStringParameters,omitempty"` +} + +// The parameters required to set up enrichment on your pipe. +type PipeEnrichmentParameters struct { + // These are custom parameter to be used when the target is an API Gateway REST + // APIs or EventBridge ApiDestinations. In the latter case, these are merged + // with any InvocationParameters specified on the Connection, with any values + // from the Connection taking precedence. + HTTPParameters *PipeEnrichmentHTTPParameters `json:"httpParameters,omitempty"` + InputTemplate *string `json:"inputTemplate,omitempty"` +} + +// The parameters for using an Active MQ broker as a source. +type PipeSourceActiveMQBrokerParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // The Secrets Manager secret that stores your broker credentials. + Credentials *MQBrokerAccessCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + QueueName *string `json:"queueName,omitempty"` +} + +// The parameters for using a DynamoDB stream as a source. +type PipeSourceDynamoDBStreamParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // A DeadLetterConfig object that contains information about a dead-letter queue + // configuration. + DeadLetterConfig *DeadLetterConfig `json:"deadLetterConfig,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + MaximumRecordAgeInSeconds *int64 `json:"maximumRecordAgeInSeconds,omitempty"` + MaximumRetryAttempts *int64 `json:"maximumRetryAttempts,omitempty"` + OnPartialBatchItemFailure *string `json:"onPartialBatchItemFailure,omitempty"` + ParallelizationFactor *int64 `json:"parallelizationFactor,omitempty"` + StartingPosition *string `json:"startingPosition,omitempty"` +} + +// The parameters for using a Kinesis stream as a source. +type PipeSourceKinesisStreamParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // A DeadLetterConfig object that contains information about a dead-letter queue + // configuration. + DeadLetterConfig *DeadLetterConfig `json:"deadLetterConfig,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + MaximumRecordAgeInSeconds *int64 `json:"maximumRecordAgeInSeconds,omitempty"` + MaximumRetryAttempts *int64 `json:"maximumRetryAttempts,omitempty"` + OnPartialBatchItemFailure *string `json:"onPartialBatchItemFailure,omitempty"` + ParallelizationFactor *int64 `json:"parallelizationFactor,omitempty"` + StartingPosition *string `json:"startingPosition,omitempty"` + StartingPositionTimestamp *metav1.Time `json:"startingPositionTimestamp,omitempty"` +} + +// The parameters for using an MSK stream as a source. +type PipeSourceManagedStreamingKafkaParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + ConsumerGroupID *string `json:"consumerGroupID,omitempty"` + // The Secrets Manager secret that stores your stream credentials. + Credentials *MSKAccessCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + StartingPosition *string `json:"startingPosition,omitempty"` + TopicName *string `json:"topicName,omitempty"` +} + +// The parameters required to set up a source for your pipe. +type PipeSourceParameters struct { + // The parameters for using an Active MQ broker as a source. + ActiveMQBrokerParameters *PipeSourceActiveMQBrokerParameters `json:"activeMQBrokerParameters,omitempty"` + // The parameters for using a DynamoDB stream as a source. + DynamoDBStreamParameters *PipeSourceDynamoDBStreamParameters `json:"dynamoDBStreamParameters,omitempty"` + // The collection of event patterns used to filter events. For more information, + // see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + // in the Amazon EventBridge User Guide. + FilterCriteria *FilterCriteria `json:"filterCriteria,omitempty"` + // The parameters for using a Kinesis stream as a source. + KinesisStreamParameters *PipeSourceKinesisStreamParameters `json:"kinesisStreamParameters,omitempty"` + // The parameters for using an MSK stream as a source. + ManagedStreamingKafkaParameters *PipeSourceManagedStreamingKafkaParameters `json:"managedStreamingKafkaParameters,omitempty"` + // The parameters for using a Rabbit MQ broker as a source. + RabbitMQBrokerParameters *PipeSourceRabbitMQBrokerParameters `json:"rabbitMQBrokerParameters,omitempty"` + // The parameters for using a self-managed Apache Kafka stream as a source. + SelfManagedKafkaParameters *PipeSourceSelfManagedKafkaParameters `json:"selfManagedKafkaParameters,omitempty"` + // The parameters for using a Amazon SQS stream as a source. + SQSQueueParameters *PipeSourceSQSQueueParameters `json:"sqsQueueParameters,omitempty"` +} + +// The parameters for using a Rabbit MQ broker as a source. +type PipeSourceRabbitMQBrokerParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // The Secrets Manager secret that stores your broker credentials. + Credentials *MQBrokerAccessCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + QueueName *string `json:"queueName,omitempty"` + VirtualHost *string `json:"virtualHost,omitempty"` +} + +// The parameters for using a Amazon SQS stream as a source. +type PipeSourceSQSQueueParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` +} + +// The parameters for using a self-managed Apache Kafka stream as a source. +type PipeSourceSelfManagedKafkaParameters struct { + AdditionalBootstrapServers []*string `json:"additionalBootstrapServers,omitempty"` + BatchSize *int64 `json:"batchSize,omitempty"` + ConsumerGroupID *string `json:"consumerGroupID,omitempty"` + // The Secrets Manager secret that stores your stream credentials. + Credentials *SelfManagedKafkaAccessConfigurationCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + // // Optional SecretManager ARN which stores the database credentials + ServerRootCaCertificate *string `json:"serverRootCaCertificate,omitempty"` + StartingPosition *string `json:"startingPosition,omitempty"` + TopicName *string `json:"topicName,omitempty"` + // This structure specifies the VPC subnets and security groups for the stream, + // and whether a public IP address is to be used. + VPC *SelfManagedKafkaAccessConfigurationVPC `json:"vpc,omitempty"` +} + +// The parameters for using an Batch job as a target. +type PipeTargetBatchJobParameters struct { + // The array properties for the submitted job, such as the size of the array. + // The array size can be between 2 and 10,000. If you specify array properties + // for a job, it becomes an array job. This parameter is used only if the target + // is an Batch job. + ArrayProperties *BatchArrayProperties `json:"arrayProperties,omitempty"` + // The overrides that are sent to a container. + ContainerOverrides *BatchContainerOverrides `json:"containerOverrides,omitempty"` + DependsOn []*BatchJobDependency `json:"dependsOn,omitempty"` + JobDefinition *string `json:"jobDefinition,omitempty"` + JobName *string `json:"jobName,omitempty"` + Parameters map[string]*string `json:"parameters,omitempty"` + // The retry strategy that's associated with a job. For more information, see + // Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) + // in the Batch User Guide. + RetryStrategy *BatchRetryStrategy `json:"retryStrategy,omitempty"` +} + +// The parameters for using an CloudWatch Logs log stream as a target. +type PipeTargetCloudWatchLogsParameters struct { + LogStreamName *string `json:"logStreamName,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` +} + +// The parameters for using an Amazon ECS task as a target. +type PipeTargetECSTaskParameters struct { + CapacityProviderStrategy []*CapacityProviderStrategyItem `json:"capacityProviderStrategy,omitempty"` + EnableECSManagedTags *bool `json:"enableECSManagedTags,omitempty"` + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty"` + Group *string `json:"group,omitempty"` + LaunchType *string `json:"launchType,omitempty"` + // This structure specifies the network configuration for an Amazon ECS task. + NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"` + // The overrides that are associated with a task. + Overrides *ECSTaskOverride `json:"overrides,omitempty"` + PlacementConstraints []*PlacementConstraint `json:"placementConstraints,omitempty"` + PlacementStrategy []*PlacementStrategy `json:"placementStrategy,omitempty"` + PlatformVersion *string `json:"platformVersion,omitempty"` + PropagateTags *string `json:"propagateTags,omitempty"` + ReferenceID *string `json:"referenceID,omitempty"` + Tags []*Tag `json:"tags,omitempty"` + TaskCount *int64 `json:"taskCount,omitempty"` + TaskDefinitionARN *string `json:"taskDefinitionARN,omitempty"` +} + +// The parameters for using an EventBridge event bus as a target. +type PipeTargetEventBridgeEventBusParameters struct { + DetailType *string `json:"detailType,omitempty"` + EndpointID *string `json:"endpointID,omitempty"` + Resources []*string `json:"resources,omitempty"` + Source *string `json:"source,omitempty"` + Time *string `json:"time,omitempty"` +} + +// These are custom parameter to be used when the target is an API Gateway REST +// APIs or EventBridge ApiDestinations. +type PipeTargetHTTPParameters struct { + HeaderParameters map[string]*string `json:"headerParameters,omitempty"` + PathParameterValues []*string `json:"pathParameterValues,omitempty"` + QueryStringParameters map[string]*string `json:"queryStringParameters,omitempty"` +} + +// The parameters for using a Kinesis stream as a source. +type PipeTargetKinesisStreamParameters struct { + PartitionKey *string `json:"partitionKey,omitempty"` +} + +// The parameters for using a Lambda function as a target. +type PipeTargetLambdaFunctionParameters struct { + InvocationType *string `json:"invocationType,omitempty"` +} + +// The parameters required to set up a target for your pipe. +type PipeTargetParameters struct { + // The parameters for using an Batch job as a target. + BatchJobParameters *PipeTargetBatchJobParameters `json:"batchJobParameters,omitempty"` + // The parameters for using an CloudWatch Logs log stream as a target. + CloudWatchLogsParameters *PipeTargetCloudWatchLogsParameters `json:"cloudWatchLogsParameters,omitempty"` + // The parameters for using an Amazon ECS task as a target. + ECSTaskParameters *PipeTargetECSTaskParameters `json:"ecsTaskParameters,omitempty"` + // The parameters for using an EventBridge event bus as a target. + EventBridgeEventBusParameters *PipeTargetEventBridgeEventBusParameters `json:"eventBridgeEventBusParameters,omitempty"` + // These are custom parameter to be used when the target is an API Gateway REST + // APIs or EventBridge ApiDestinations. + HTTPParameters *PipeTargetHTTPParameters `json:"httpParameters,omitempty"` + InputTemplate *string `json:"inputTemplate,omitempty"` + // The parameters for using a Kinesis stream as a source. + KinesisStreamParameters *PipeTargetKinesisStreamParameters `json:"kinesisStreamParameters,omitempty"` + // The parameters for using a Lambda function as a target. + LambdaFunctionParameters *PipeTargetLambdaFunctionParameters `json:"lambdaFunctionParameters,omitempty"` + // These are custom parameters to be used when the target is a Amazon Redshift + // cluster to invoke the Amazon Redshift Data API ExecuteStatement. + RedshiftDataParameters *PipeTargetRedshiftDataParameters `json:"redshiftDataParameters,omitempty"` + // The parameters for using a SageMaker pipeline as a target. + SageMakerPipelineParameters *PipeTargetSageMakerPipelineParameters `json:"sageMakerPipelineParameters,omitempty"` + // The parameters for using a Amazon SQS stream as a source. + SQSQueueParameters *PipeTargetSQSQueueParameters `json:"sqsQueueParameters,omitempty"` + // The parameters for using a Step Functions state machine as a target. + StepFunctionStateMachineParameters *PipeTargetStateMachineParameters `json:"stepFunctionStateMachineParameters,omitempty"` +} + +// These are custom parameters to be used when the target is a Amazon Redshift +// cluster to invoke the Amazon Redshift Data API ExecuteStatement. +type PipeTargetRedshiftDataParameters struct { + // // Redshift Database + Database *string `json:"database,omitempty"` + // // Database user name + DBUser *string `json:"dbUser,omitempty"` + // // For targets, can either specify an ARN or a jsonpath pointing to the ARN. + SecretManagerARN *string `json:"secretManagerARN,omitempty"` + // // A list of SQLs. + SQLs []*string `json:"sqls,omitempty"` + // // A name for Redshift DataAPI statement which can be used as filter of // + // ListStatement. + StatementName *string `json:"statementName,omitempty"` + WithEvent *bool `json:"withEvent,omitempty"` +} + +// The parameters for using a Amazon SQS stream as a source. +type PipeTargetSQSQueueParameters struct { + MessageDeduplicationID *string `json:"messageDeduplicationID,omitempty"` + MessageGroupID *string `json:"messageGroupID,omitempty"` +} + +// The parameters for using a SageMaker pipeline as a target. +type PipeTargetSageMakerPipelineParameters struct { + PipelineParameterList []*SageMakerPipelineParameter `json:"pipelineParameterList,omitempty"` +} + +// The parameters for using a Step Functions state machine as a target. +type PipeTargetStateMachineParameters struct { + InvocationType *string `json:"invocationType,omitempty"` +} + +// An object that represents a pipe. Amazon EventBridgePipes connect event sources +// to targets and reduces the need for specialized knowledge and integration +// code. +type Pipe_SDK struct { + ARN *string `json:"arn,omitempty"` + CreationTime *metav1.Time `json:"creationTime,omitempty"` + CurrentState *string `json:"currentState,omitempty"` + DesiredState *string `json:"desiredState,omitempty"` + Enrichment *string `json:"enrichment,omitempty"` + LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` + Name *string `json:"name,omitempty"` + Source *string `json:"source,omitempty"` + StateReason *string `json:"stateReason,omitempty"` + Target *string `json:"target,omitempty"` +} + +// An object representing a constraint on task placement. To learn more, see +// Task Placement Constraints (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) +// in the Amazon Elastic Container Service Developer Guide. +type PlacementConstraint struct { + Expression *string `json:"expression,omitempty"` + Type *string `json:"type_,omitempty"` +} + +// The task placement strategy for a task or service. To learn more, see Task +// Placement Strategies (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html) +// in the Amazon Elastic Container Service Service Developer Guide. +type PlacementStrategy struct { + Field *string `json:"field,omitempty"` + Type *string `json:"type_,omitempty"` +} + +// Name/Value pair of a parameter to start execution of a SageMaker Model Building +// Pipeline. +type SageMakerPipelineParameter struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// The Secrets Manager secret that stores your stream credentials. +type SelfManagedKafkaAccessConfigurationCredentials struct { + // // Optional SecretManager ARN which stores the database credentials + BasicAuth *string `json:"basicAuth,omitempty"` + // // Optional SecretManager ARN which stores the database credentials + ClientCertificateTLSAuth *string `json:"clientCertificateTLSAuth,omitempty"` + // // Optional SecretManager ARN which stores the database credentials + SASLSCRAM256Auth *string `json:"saslSCRAM256Auth,omitempty"` + // // Optional SecretManager ARN which stores the database credentials + SASLSCRAM512Auth *string `json:"saslSCRAM512Auth,omitempty"` +} + +// This structure specifies the VPC subnets and security groups for the stream, +// and whether a public IP address is to be used. +type SelfManagedKafkaAccessConfigurationVPC struct { + // List of SecurityGroupId. + SecurityGroup []*string `json:"securityGroup,omitempty"` + // List of SubnetId. + Subnets []*string `json:"subnets,omitempty"` +} + +// A key-value pair associated with an Amazon Web Services resource. In EventBridge, +// rules and event buses support tagging. +type Tag struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// The parameters for using an Active MQ broker as a source. +type UpdatePipeSourceActiveMQBrokerParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // The Secrets Manager secret that stores your broker credentials. + Credentials *MQBrokerAccessCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` +} + +// The parameters for using a DynamoDB stream as a source. +type UpdatePipeSourceDynamoDBStreamParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // A DeadLetterConfig object that contains information about a dead-letter queue + // configuration. + DeadLetterConfig *DeadLetterConfig `json:"deadLetterConfig,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + MaximumRecordAgeInSeconds *int64 `json:"maximumRecordAgeInSeconds,omitempty"` + MaximumRetryAttempts *int64 `json:"maximumRetryAttempts,omitempty"` + OnPartialBatchItemFailure *string `json:"onPartialBatchItemFailure,omitempty"` + ParallelizationFactor *int64 `json:"parallelizationFactor,omitempty"` +} + +// The parameters for using a Kinesis stream as a source. +type UpdatePipeSourceKinesisStreamParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // A DeadLetterConfig object that contains information about a dead-letter queue + // configuration. + DeadLetterConfig *DeadLetterConfig `json:"deadLetterConfig,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + MaximumRecordAgeInSeconds *int64 `json:"maximumRecordAgeInSeconds,omitempty"` + MaximumRetryAttempts *int64 `json:"maximumRetryAttempts,omitempty"` + OnPartialBatchItemFailure *string `json:"onPartialBatchItemFailure,omitempty"` + ParallelizationFactor *int64 `json:"parallelizationFactor,omitempty"` +} + +// The parameters for using an MSK stream as a source. +type UpdatePipeSourceManagedStreamingKafkaParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // The Secrets Manager secret that stores your stream credentials. + Credentials *MSKAccessCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` +} + +// The parameters required to set up a source for your pipe. +type UpdatePipeSourceParameters struct { + // The parameters for using an Active MQ broker as a source. + ActiveMQBrokerParameters *UpdatePipeSourceActiveMQBrokerParameters `json:"activeMQBrokerParameters,omitempty"` + // The parameters for using a DynamoDB stream as a source. + DynamoDBStreamParameters *UpdatePipeSourceDynamoDBStreamParameters `json:"dynamoDBStreamParameters,omitempty"` + // The collection of event patterns used to filter events. For more information, + // see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + // in the Amazon EventBridge User Guide. + FilterCriteria *FilterCriteria `json:"filterCriteria,omitempty"` + // The parameters for using a Kinesis stream as a source. + KinesisStreamParameters *UpdatePipeSourceKinesisStreamParameters `json:"kinesisStreamParameters,omitempty"` + // The parameters for using an MSK stream as a source. + ManagedStreamingKafkaParameters *UpdatePipeSourceManagedStreamingKafkaParameters `json:"managedStreamingKafkaParameters,omitempty"` + // The parameters for using a Rabbit MQ broker as a source. + RabbitMQBrokerParameters *UpdatePipeSourceRabbitMQBrokerParameters `json:"rabbitMQBrokerParameters,omitempty"` + // The parameters for using a self-managed Apache Kafka stream as a source. + SelfManagedKafkaParameters *UpdatePipeSourceSelfManagedKafkaParameters `json:"selfManagedKafkaParameters,omitempty"` + // The parameters for using a Amazon SQS stream as a source. + SQSQueueParameters *UpdatePipeSourceSQSQueueParameters `json:"sqsQueueParameters,omitempty"` +} + +// The parameters for using a Rabbit MQ broker as a source. +type UpdatePipeSourceRabbitMQBrokerParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // The Secrets Manager secret that stores your broker credentials. + Credentials *MQBrokerAccessCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` +} + +// The parameters for using a Amazon SQS stream as a source. +type UpdatePipeSourceSQSQueueParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` +} + +// The parameters for using a self-managed Apache Kafka stream as a source. +type UpdatePipeSourceSelfManagedKafkaParameters struct { + BatchSize *int64 `json:"batchSize,omitempty"` + // The Secrets Manager secret that stores your stream credentials. + Credentials *SelfManagedKafkaAccessConfigurationCredentials `json:"credentials,omitempty"` + MaximumBatchingWindowInSeconds *int64 `json:"maximumBatchingWindowInSeconds,omitempty"` + // // Optional SecretManager ARN which stores the database credentials + ServerRootCaCertificate *string `json:"serverRootCaCertificate,omitempty"` + // This structure specifies the VPC subnets and security groups for the stream, + // and whether a public IP address is to be used. + VPC *SelfManagedKafkaAccessConfigurationVPC `json:"vpc,omitempty"` +} + +// Indicates that an error has occurred while performing a validate operation. +type ValidationExceptionField struct { + Name *string `json:"name,omitempty"` +} diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..eae70b4 --- /dev/null +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2416 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSVPCConfiguration) DeepCopyInto(out *AWSVPCConfiguration) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSVPCConfiguration. +func (in *AWSVPCConfiguration) DeepCopy() *AWSVPCConfiguration { + if in == nil { + return nil + } + out := new(AWSVPCConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchArrayProperties) DeepCopyInto(out *BatchArrayProperties) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchArrayProperties. +func (in *BatchArrayProperties) DeepCopy() *BatchArrayProperties { + if in == nil { + return nil + } + out := new(BatchArrayProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchContainerOverrides) DeepCopyInto(out *BatchContainerOverrides) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]*BatchEnvironmentVariable, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(BatchEnvironmentVariable) + (*in).DeepCopyInto(*out) + } + } + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.ResourceRequirements != nil { + in, out := &in.ResourceRequirements, &out.ResourceRequirements + *out = make([]*BatchResourceRequirement, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(BatchResourceRequirement) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchContainerOverrides. +func (in *BatchContainerOverrides) DeepCopy() *BatchContainerOverrides { + if in == nil { + return nil + } + out := new(BatchContainerOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchEnvironmentVariable) DeepCopyInto(out *BatchEnvironmentVariable) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchEnvironmentVariable. +func (in *BatchEnvironmentVariable) DeepCopy() *BatchEnvironmentVariable { + if in == nil { + return nil + } + out := new(BatchEnvironmentVariable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchJobDependency) DeepCopyInto(out *BatchJobDependency) { + *out = *in + if in.JobID != nil { + in, out := &in.JobID, &out.JobID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchJobDependency. +func (in *BatchJobDependency) DeepCopy() *BatchJobDependency { + if in == nil { + return nil + } + out := new(BatchJobDependency) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchResourceRequirement) DeepCopyInto(out *BatchResourceRequirement) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchResourceRequirement. +func (in *BatchResourceRequirement) DeepCopy() *BatchResourceRequirement { + if in == nil { + return nil + } + out := new(BatchResourceRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchRetryStrategy) DeepCopyInto(out *BatchRetryStrategy) { + *out = *in + if in.Attempts != nil { + in, out := &in.Attempts, &out.Attempts + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchRetryStrategy. +func (in *BatchRetryStrategy) DeepCopy() *BatchRetryStrategy { + if in == nil { + return nil + } + out := new(BatchRetryStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyItem) DeepCopyInto(out *CapacityProviderStrategyItem) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(int64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyItem. +func (in *CapacityProviderStrategyItem) DeepCopy() *CapacityProviderStrategyItem { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfig) DeepCopyInto(out *DeadLetterConfig) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfig. +func (in *DeadLetterConfig) DeepCopy() *DeadLetterConfig { + if in == nil { + return nil + } + out := new(DeadLetterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSContainerOverride) DeepCopyInto(out *ECSContainerOverride) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(int64) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]*ECSEnvironmentVariable, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ECSEnvironmentVariable) + (*in).DeepCopyInto(*out) + } + } + } + if in.EnvironmentFiles != nil { + in, out := &in.EnvironmentFiles, &out.EnvironmentFiles + *out = make([]*ECSEnvironmentFile, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ECSEnvironmentFile) + (*in).DeepCopyInto(*out) + } + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(int64) + **out = **in + } + if in.MemoryReservation != nil { + in, out := &in.MemoryReservation, &out.MemoryReservation + *out = new(int64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceRequirements != nil { + in, out := &in.ResourceRequirements, &out.ResourceRequirements + *out = make([]*ECSResourceRequirement, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ECSResourceRequirement) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSContainerOverride. +func (in *ECSContainerOverride) DeepCopy() *ECSContainerOverride { + if in == nil { + return nil + } + out := new(ECSContainerOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSEnvironmentFile) DeepCopyInto(out *ECSEnvironmentFile) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSEnvironmentFile. +func (in *ECSEnvironmentFile) DeepCopy() *ECSEnvironmentFile { + if in == nil { + return nil + } + out := new(ECSEnvironmentFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSEnvironmentVariable) DeepCopyInto(out *ECSEnvironmentVariable) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSEnvironmentVariable. +func (in *ECSEnvironmentVariable) DeepCopy() *ECSEnvironmentVariable { + if in == nil { + return nil + } + out := new(ECSEnvironmentVariable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSEphemeralStorage) DeepCopyInto(out *ECSEphemeralStorage) { + *out = *in + if in.SizeInGiB != nil { + in, out := &in.SizeInGiB, &out.SizeInGiB + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSEphemeralStorage. +func (in *ECSEphemeralStorage) DeepCopy() *ECSEphemeralStorage { + if in == nil { + return nil + } + out := new(ECSEphemeralStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSInferenceAcceleratorOverride) DeepCopyInto(out *ECSInferenceAcceleratorOverride) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DeviceType != nil { + in, out := &in.DeviceType, &out.DeviceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSInferenceAcceleratorOverride. +func (in *ECSInferenceAcceleratorOverride) DeepCopy() *ECSInferenceAcceleratorOverride { + if in == nil { + return nil + } + out := new(ECSInferenceAcceleratorOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSResourceRequirement) DeepCopyInto(out *ECSResourceRequirement) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSResourceRequirement. +func (in *ECSResourceRequirement) DeepCopy() *ECSResourceRequirement { + if in == nil { + return nil + } + out := new(ECSResourceRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSTaskOverride) DeepCopyInto(out *ECSTaskOverride) { + *out = *in + if in.ContainerOverrides != nil { + in, out := &in.ContainerOverrides, &out.ContainerOverrides + *out = make([]*ECSContainerOverride, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ECSContainerOverride) + (*in).DeepCopyInto(*out) + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(ECSEphemeralStorage) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleARN != nil { + in, out := &in.ExecutionRoleARN, &out.ExecutionRoleARN + *out = new(string) + **out = **in + } + if in.InferenceAcceleratorOverrides != nil { + in, out := &in.InferenceAcceleratorOverrides, &out.InferenceAcceleratorOverrides + *out = make([]*ECSInferenceAcceleratorOverride, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ECSInferenceAcceleratorOverride) + (*in).DeepCopyInto(*out) + } + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.TaskRoleARN != nil { + in, out := &in.TaskRoleARN, &out.TaskRoleARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSTaskOverride. +func (in *ECSTaskOverride) DeepCopy() *ECSTaskOverride { + if in == nil { + return nil + } + out := new(ECSTaskOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterCriteria) DeepCopyInto(out *FilterCriteria) { + *out = *in + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]*Filter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Filter) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterCriteria. +func (in *FilterCriteria) DeepCopy() *FilterCriteria { + if in == nil { + return nil + } + out := new(FilterCriteria) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MQBrokerAccessCredentials) DeepCopyInto(out *MQBrokerAccessCredentials) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MQBrokerAccessCredentials. +func (in *MQBrokerAccessCredentials) DeepCopy() *MQBrokerAccessCredentials { + if in == nil { + return nil + } + out := new(MQBrokerAccessCredentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSKAccessCredentials) DeepCopyInto(out *MSKAccessCredentials) { + *out = *in + if in.ClientCertificateTLSAuth != nil { + in, out := &in.ClientCertificateTLSAuth, &out.ClientCertificateTLSAuth + *out = new(string) + **out = **in + } + if in.SASLSCRAM512Auth != nil { + in, out := &in.SASLSCRAM512Auth, &out.SASLSCRAM512Auth + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSKAccessCredentials. +func (in *MSKAccessCredentials) DeepCopy() *MSKAccessCredentials { + if in == nil { + return nil + } + out := new(MSKAccessCredentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfiguration) DeepCopyInto(out *NetworkConfiguration) { + *out = *in + if in.AWSVPCConfiguration != nil { + in, out := &in.AWSVPCConfiguration, &out.AWSVPCConfiguration + *out = new(AWSVPCConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfiguration. +func (in *NetworkConfiguration) DeepCopy() *NetworkConfiguration { + if in == nil { + return nil + } + out := new(NetworkConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pipe) DeepCopyInto(out *Pipe) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipe. +func (in *Pipe) DeepCopy() *Pipe { + if in == nil { + return nil + } + out := new(Pipe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pipe) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeEnrichmentHTTPParameters) DeepCopyInto(out *PipeEnrichmentHTTPParameters) { + *out = *in + if in.HeaderParameters != nil { + in, out := &in.HeaderParameters, &out.HeaderParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PathParameterValues != nil { + in, out := &in.PathParameterValues, &out.PathParameterValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeEnrichmentHTTPParameters. +func (in *PipeEnrichmentHTTPParameters) DeepCopy() *PipeEnrichmentHTTPParameters { + if in == nil { + return nil + } + out := new(PipeEnrichmentHTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeEnrichmentParameters) DeepCopyInto(out *PipeEnrichmentParameters) { + *out = *in + if in.HTTPParameters != nil { + in, out := &in.HTTPParameters, &out.HTTPParameters + *out = new(PipeEnrichmentHTTPParameters) + (*in).DeepCopyInto(*out) + } + if in.InputTemplate != nil { + in, out := &in.InputTemplate, &out.InputTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeEnrichmentParameters. +func (in *PipeEnrichmentParameters) DeepCopy() *PipeEnrichmentParameters { + if in == nil { + return nil + } + out := new(PipeEnrichmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeList) DeepCopyInto(out *PipeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pipe, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeList. +func (in *PipeList) DeepCopy() *PipeList { + if in == nil { + return nil + } + out := new(PipeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PipeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceActiveMQBrokerParameters) DeepCopyInto(out *PipeSourceActiveMQBrokerParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(MQBrokerAccessCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceActiveMQBrokerParameters. +func (in *PipeSourceActiveMQBrokerParameters) DeepCopy() *PipeSourceActiveMQBrokerParameters { + if in == nil { + return nil + } + out := new(PipeSourceActiveMQBrokerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceDynamoDBStreamParameters) DeepCopyInto(out *PipeSourceDynamoDBStreamParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfig) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRecordAgeInSeconds != nil { + in, out := &in.MaximumRecordAgeInSeconds, &out.MaximumRecordAgeInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(int64) + **out = **in + } + if in.OnPartialBatchItemFailure != nil { + in, out := &in.OnPartialBatchItemFailure, &out.OnPartialBatchItemFailure + *out = new(string) + **out = **in + } + if in.ParallelizationFactor != nil { + in, out := &in.ParallelizationFactor, &out.ParallelizationFactor + *out = new(int64) + **out = **in + } + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceDynamoDBStreamParameters. +func (in *PipeSourceDynamoDBStreamParameters) DeepCopy() *PipeSourceDynamoDBStreamParameters { + if in == nil { + return nil + } + out := new(PipeSourceDynamoDBStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceKinesisStreamParameters) DeepCopyInto(out *PipeSourceKinesisStreamParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfig) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRecordAgeInSeconds != nil { + in, out := &in.MaximumRecordAgeInSeconds, &out.MaximumRecordAgeInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(int64) + **out = **in + } + if in.OnPartialBatchItemFailure != nil { + in, out := &in.OnPartialBatchItemFailure, &out.OnPartialBatchItemFailure + *out = new(string) + **out = **in + } + if in.ParallelizationFactor != nil { + in, out := &in.ParallelizationFactor, &out.ParallelizationFactor + *out = new(int64) + **out = **in + } + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } + if in.StartingPositionTimestamp != nil { + in, out := &in.StartingPositionTimestamp, &out.StartingPositionTimestamp + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceKinesisStreamParameters. +func (in *PipeSourceKinesisStreamParameters) DeepCopy() *PipeSourceKinesisStreamParameters { + if in == nil { + return nil + } + out := new(PipeSourceKinesisStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceManagedStreamingKafkaParameters) DeepCopyInto(out *PipeSourceManagedStreamingKafkaParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(MSKAccessCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceManagedStreamingKafkaParameters. +func (in *PipeSourceManagedStreamingKafkaParameters) DeepCopy() *PipeSourceManagedStreamingKafkaParameters { + if in == nil { + return nil + } + out := new(PipeSourceManagedStreamingKafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceParameters) DeepCopyInto(out *PipeSourceParameters) { + *out = *in + if in.ActiveMQBrokerParameters != nil { + in, out := &in.ActiveMQBrokerParameters, &out.ActiveMQBrokerParameters + *out = new(PipeSourceActiveMQBrokerParameters) + (*in).DeepCopyInto(*out) + } + if in.DynamoDBStreamParameters != nil { + in, out := &in.DynamoDBStreamParameters, &out.DynamoDBStreamParameters + *out = new(PipeSourceDynamoDBStreamParameters) + (*in).DeepCopyInto(*out) + } + if in.FilterCriteria != nil { + in, out := &in.FilterCriteria, &out.FilterCriteria + *out = new(FilterCriteria) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamParameters != nil { + in, out := &in.KinesisStreamParameters, &out.KinesisStreamParameters + *out = new(PipeSourceKinesisStreamParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedStreamingKafkaParameters != nil { + in, out := &in.ManagedStreamingKafkaParameters, &out.ManagedStreamingKafkaParameters + *out = new(PipeSourceManagedStreamingKafkaParameters) + (*in).DeepCopyInto(*out) + } + if in.RabbitMQBrokerParameters != nil { + in, out := &in.RabbitMQBrokerParameters, &out.RabbitMQBrokerParameters + *out = new(PipeSourceRabbitMQBrokerParameters) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedKafkaParameters != nil { + in, out := &in.SelfManagedKafkaParameters, &out.SelfManagedKafkaParameters + *out = new(PipeSourceSelfManagedKafkaParameters) + (*in).DeepCopyInto(*out) + } + if in.SQSQueueParameters != nil { + in, out := &in.SQSQueueParameters, &out.SQSQueueParameters + *out = new(PipeSourceSQSQueueParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceParameters. +func (in *PipeSourceParameters) DeepCopy() *PipeSourceParameters { + if in == nil { + return nil + } + out := new(PipeSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceRabbitMQBrokerParameters) DeepCopyInto(out *PipeSourceRabbitMQBrokerParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(MQBrokerAccessCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } + if in.VirtualHost != nil { + in, out := &in.VirtualHost, &out.VirtualHost + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceRabbitMQBrokerParameters. +func (in *PipeSourceRabbitMQBrokerParameters) DeepCopy() *PipeSourceRabbitMQBrokerParameters { + if in == nil { + return nil + } + out := new(PipeSourceRabbitMQBrokerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceSQSQueueParameters) DeepCopyInto(out *PipeSourceSQSQueueParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceSQSQueueParameters. +func (in *PipeSourceSQSQueueParameters) DeepCopy() *PipeSourceSQSQueueParameters { + if in == nil { + return nil + } + out := new(PipeSourceSQSQueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSourceSelfManagedKafkaParameters) DeepCopyInto(out *PipeSourceSelfManagedKafkaParameters) { + *out = *in + if in.AdditionalBootstrapServers != nil { + in, out := &in.AdditionalBootstrapServers, &out.AdditionalBootstrapServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(SelfManagedKafkaAccessConfigurationCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.ServerRootCaCertificate != nil { + in, out := &in.ServerRootCaCertificate, &out.ServerRootCaCertificate + *out = new(string) + **out = **in + } + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(SelfManagedKafkaAccessConfigurationVPC) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSourceSelfManagedKafkaParameters. +func (in *PipeSourceSelfManagedKafkaParameters) DeepCopy() *PipeSourceSelfManagedKafkaParameters { + if in == nil { + return nil + } + out := new(PipeSourceSelfManagedKafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeSpec) DeepCopyInto(out *PipeSpec) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.Enrichment != nil { + in, out := &in.Enrichment, &out.Enrichment + *out = new(string) + **out = **in + } + if in.EnrichmentParameters != nil { + in, out := &in.EnrichmentParameters, &out.EnrichmentParameters + *out = new(PipeEnrichmentParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourceParameters != nil { + in, out := &in.SourceParameters, &out.SourceParameters + *out = new(PipeSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.TargetParameters != nil { + in, out := &in.TargetParameters, &out.TargetParameters + *out = new(PipeTargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeSpec. +func (in *PipeSpec) DeepCopy() *PipeSpec { + if in == nil { + return nil + } + out := new(PipeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeStatus) DeepCopyInto(out *PipeStatus) { + *out = *in + if in.ACKResourceMetadata != nil { + in, out := &in.ACKResourceMetadata, &out.ACKResourceMetadata + *out = new(corev1alpha1.ResourceMetadata) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]*corev1alpha1.Condition, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.Condition) + (*in).DeepCopyInto(*out) + } + } + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.CurrentState != nil { + in, out := &in.CurrentState, &out.CurrentState + *out = new(string) + **out = **in + } + if in.LastModifiedTime != nil { + in, out := &in.LastModifiedTime, &out.LastModifiedTime + *out = (*in).DeepCopy() + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeStatus. +func (in *PipeStatus) DeepCopy() *PipeStatus { + if in == nil { + return nil + } + out := new(PipeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetBatchJobParameters) DeepCopyInto(out *PipeTargetBatchJobParameters) { + *out = *in + if in.ArrayProperties != nil { + in, out := &in.ArrayProperties, &out.ArrayProperties + *out = new(BatchArrayProperties) + (*in).DeepCopyInto(*out) + } + if in.ContainerOverrides != nil { + in, out := &in.ContainerOverrides, &out.ContainerOverrides + *out = new(BatchContainerOverrides) + (*in).DeepCopyInto(*out) + } + if in.DependsOn != nil { + in, out := &in.DependsOn, &out.DependsOn + *out = make([]*BatchJobDependency, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(BatchJobDependency) + (*in).DeepCopyInto(*out) + } + } + } + if in.JobDefinition != nil { + in, out := &in.JobDefinition, &out.JobDefinition + *out = new(string) + **out = **in + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RetryStrategy != nil { + in, out := &in.RetryStrategy, &out.RetryStrategy + *out = new(BatchRetryStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetBatchJobParameters. +func (in *PipeTargetBatchJobParameters) DeepCopy() *PipeTargetBatchJobParameters { + if in == nil { + return nil + } + out := new(PipeTargetBatchJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetCloudWatchLogsParameters) DeepCopyInto(out *PipeTargetCloudWatchLogsParameters) { + *out = *in + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetCloudWatchLogsParameters. +func (in *PipeTargetCloudWatchLogsParameters) DeepCopy() *PipeTargetCloudWatchLogsParameters { + if in == nil { + return nil + } + out := new(PipeTargetCloudWatchLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetECSTaskParameters) DeepCopyInto(out *PipeTargetECSTaskParameters) { + *out = *in + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]*CapacityProviderStrategyItem, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CapacityProviderStrategyItem) + (*in).DeepCopyInto(*out) + } + } + } + if in.EnableECSManagedTags != nil { + in, out := &in.EnableECSManagedTags, &out.EnableECSManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = new(ECSTaskOverride) + (*in).DeepCopyInto(*out) + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]*PlacementConstraint, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PlacementConstraint) + (*in).DeepCopyInto(*out) + } + } + } + if in.PlacementStrategy != nil { + in, out := &in.PlacementStrategy, &out.PlacementStrategy + *out = make([]*PlacementStrategy, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PlacementStrategy) + (*in).DeepCopyInto(*out) + } + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.ReferenceID != nil { + in, out := &in.ReferenceID, &out.ReferenceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } + if in.TaskCount != nil { + in, out := &in.TaskCount, &out.TaskCount + *out = new(int64) + **out = **in + } + if in.TaskDefinitionARN != nil { + in, out := &in.TaskDefinitionARN, &out.TaskDefinitionARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetECSTaskParameters. +func (in *PipeTargetECSTaskParameters) DeepCopy() *PipeTargetECSTaskParameters { + if in == nil { + return nil + } + out := new(PipeTargetECSTaskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetEventBridgeEventBusParameters) DeepCopyInto(out *PipeTargetEventBridgeEventBusParameters) { + *out = *in + if in.DetailType != nil { + in, out := &in.DetailType, &out.DetailType + *out = new(string) + **out = **in + } + if in.EndpointID != nil { + in, out := &in.EndpointID, &out.EndpointID + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetEventBridgeEventBusParameters. +func (in *PipeTargetEventBridgeEventBusParameters) DeepCopy() *PipeTargetEventBridgeEventBusParameters { + if in == nil { + return nil + } + out := new(PipeTargetEventBridgeEventBusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetHTTPParameters) DeepCopyInto(out *PipeTargetHTTPParameters) { + *out = *in + if in.HeaderParameters != nil { + in, out := &in.HeaderParameters, &out.HeaderParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PathParameterValues != nil { + in, out := &in.PathParameterValues, &out.PathParameterValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetHTTPParameters. +func (in *PipeTargetHTTPParameters) DeepCopy() *PipeTargetHTTPParameters { + if in == nil { + return nil + } + out := new(PipeTargetHTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetKinesisStreamParameters) DeepCopyInto(out *PipeTargetKinesisStreamParameters) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetKinesisStreamParameters. +func (in *PipeTargetKinesisStreamParameters) DeepCopy() *PipeTargetKinesisStreamParameters { + if in == nil { + return nil + } + out := new(PipeTargetKinesisStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetLambdaFunctionParameters) DeepCopyInto(out *PipeTargetLambdaFunctionParameters) { + *out = *in + if in.InvocationType != nil { + in, out := &in.InvocationType, &out.InvocationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetLambdaFunctionParameters. +func (in *PipeTargetLambdaFunctionParameters) DeepCopy() *PipeTargetLambdaFunctionParameters { + if in == nil { + return nil + } + out := new(PipeTargetLambdaFunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetParameters) DeepCopyInto(out *PipeTargetParameters) { + *out = *in + if in.BatchJobParameters != nil { + in, out := &in.BatchJobParameters, &out.BatchJobParameters + *out = new(PipeTargetBatchJobParameters) + (*in).DeepCopyInto(*out) + } + if in.CloudWatchLogsParameters != nil { + in, out := &in.CloudWatchLogsParameters, &out.CloudWatchLogsParameters + *out = new(PipeTargetCloudWatchLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.ECSTaskParameters != nil { + in, out := &in.ECSTaskParameters, &out.ECSTaskParameters + *out = new(PipeTargetECSTaskParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBridgeEventBusParameters != nil { + in, out := &in.EventBridgeEventBusParameters, &out.EventBridgeEventBusParameters + *out = new(PipeTargetEventBridgeEventBusParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPParameters != nil { + in, out := &in.HTTPParameters, &out.HTTPParameters + *out = new(PipeTargetHTTPParameters) + (*in).DeepCopyInto(*out) + } + if in.InputTemplate != nil { + in, out := &in.InputTemplate, &out.InputTemplate + *out = new(string) + **out = **in + } + if in.KinesisStreamParameters != nil { + in, out := &in.KinesisStreamParameters, &out.KinesisStreamParameters + *out = new(PipeTargetKinesisStreamParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaFunctionParameters != nil { + in, out := &in.LambdaFunctionParameters, &out.LambdaFunctionParameters + *out = new(PipeTargetLambdaFunctionParameters) + (*in).DeepCopyInto(*out) + } + if in.RedshiftDataParameters != nil { + in, out := &in.RedshiftDataParameters, &out.RedshiftDataParameters + *out = new(PipeTargetRedshiftDataParameters) + (*in).DeepCopyInto(*out) + } + if in.SageMakerPipelineParameters != nil { + in, out := &in.SageMakerPipelineParameters, &out.SageMakerPipelineParameters + *out = new(PipeTargetSageMakerPipelineParameters) + (*in).DeepCopyInto(*out) + } + if in.SQSQueueParameters != nil { + in, out := &in.SQSQueueParameters, &out.SQSQueueParameters + *out = new(PipeTargetSQSQueueParameters) + (*in).DeepCopyInto(*out) + } + if in.StepFunctionStateMachineParameters != nil { + in, out := &in.StepFunctionStateMachineParameters, &out.StepFunctionStateMachineParameters + *out = new(PipeTargetStateMachineParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetParameters. +func (in *PipeTargetParameters) DeepCopy() *PipeTargetParameters { + if in == nil { + return nil + } + out := new(PipeTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetRedshiftDataParameters) DeepCopyInto(out *PipeTargetRedshiftDataParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DBUser != nil { + in, out := &in.DBUser, &out.DBUser + *out = new(string) + **out = **in + } + if in.SecretManagerARN != nil { + in, out := &in.SecretManagerARN, &out.SecretManagerARN + *out = new(string) + **out = **in + } + if in.SQLs != nil { + in, out := &in.SQLs, &out.SQLs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatementName != nil { + in, out := &in.StatementName, &out.StatementName + *out = new(string) + **out = **in + } + if in.WithEvent != nil { + in, out := &in.WithEvent, &out.WithEvent + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetRedshiftDataParameters. +func (in *PipeTargetRedshiftDataParameters) DeepCopy() *PipeTargetRedshiftDataParameters { + if in == nil { + return nil + } + out := new(PipeTargetRedshiftDataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetSQSQueueParameters) DeepCopyInto(out *PipeTargetSQSQueueParameters) { + *out = *in + if in.MessageDeduplicationID != nil { + in, out := &in.MessageDeduplicationID, &out.MessageDeduplicationID + *out = new(string) + **out = **in + } + if in.MessageGroupID != nil { + in, out := &in.MessageGroupID, &out.MessageGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetSQSQueueParameters. +func (in *PipeTargetSQSQueueParameters) DeepCopy() *PipeTargetSQSQueueParameters { + if in == nil { + return nil + } + out := new(PipeTargetSQSQueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetSageMakerPipelineParameters) DeepCopyInto(out *PipeTargetSageMakerPipelineParameters) { + *out = *in + if in.PipelineParameterList != nil { + in, out := &in.PipelineParameterList, &out.PipelineParameterList + *out = make([]*SageMakerPipelineParameter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SageMakerPipelineParameter) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetSageMakerPipelineParameters. +func (in *PipeTargetSageMakerPipelineParameters) DeepCopy() *PipeTargetSageMakerPipelineParameters { + if in == nil { + return nil + } + out := new(PipeTargetSageMakerPipelineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipeTargetStateMachineParameters) DeepCopyInto(out *PipeTargetStateMachineParameters) { + *out = *in + if in.InvocationType != nil { + in, out := &in.InvocationType, &out.InvocationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipeTargetStateMachineParameters. +func (in *PipeTargetStateMachineParameters) DeepCopy() *PipeTargetStateMachineParameters { + if in == nil { + return nil + } + out := new(PipeTargetStateMachineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pipe_SDK) DeepCopyInto(out *Pipe_SDK) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.CurrentState != nil { + in, out := &in.CurrentState, &out.CurrentState + *out = new(string) + **out = **in + } + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.Enrichment != nil { + in, out := &in.Enrichment, &out.Enrichment + *out = new(string) + **out = **in + } + if in.LastModifiedTime != nil { + in, out := &in.LastModifiedTime, &out.LastModifiedTime + *out = (*in).DeepCopy() + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipe_SDK. +func (in *Pipe_SDK) DeepCopy() *Pipe_SDK { + if in == nil { + return nil + } + out := new(Pipe_SDK) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraint) DeepCopyInto(out *PlacementConstraint) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraint. +func (in *PlacementConstraint) DeepCopy() *PlacementConstraint { + if in == nil { + return nil + } + out := new(PlacementConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementStrategy) DeepCopyInto(out *PlacementStrategy) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStrategy. +func (in *PlacementStrategy) DeepCopy() *PlacementStrategy { + if in == nil { + return nil + } + out := new(PlacementStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SageMakerPipelineParameter) DeepCopyInto(out *SageMakerPipelineParameter) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SageMakerPipelineParameter. +func (in *SageMakerPipelineParameter) DeepCopy() *SageMakerPipelineParameter { + if in == nil { + return nil + } + out := new(SageMakerPipelineParameter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedKafkaAccessConfigurationCredentials) DeepCopyInto(out *SelfManagedKafkaAccessConfigurationCredentials) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(string) + **out = **in + } + if in.ClientCertificateTLSAuth != nil { + in, out := &in.ClientCertificateTLSAuth, &out.ClientCertificateTLSAuth + *out = new(string) + **out = **in + } + if in.SASLSCRAM256Auth != nil { + in, out := &in.SASLSCRAM256Auth, &out.SASLSCRAM256Auth + *out = new(string) + **out = **in + } + if in.SASLSCRAM512Auth != nil { + in, out := &in.SASLSCRAM512Auth, &out.SASLSCRAM512Auth + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedKafkaAccessConfigurationCredentials. +func (in *SelfManagedKafkaAccessConfigurationCredentials) DeepCopy() *SelfManagedKafkaAccessConfigurationCredentials { + if in == nil { + return nil + } + out := new(SelfManagedKafkaAccessConfigurationCredentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedKafkaAccessConfigurationVPC) DeepCopyInto(out *SelfManagedKafkaAccessConfigurationVPC) { + *out = *in + if in.SecurityGroup != nil { + in, out := &in.SecurityGroup, &out.SecurityGroup + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedKafkaAccessConfigurationVPC. +func (in *SelfManagedKafkaAccessConfigurationVPC) DeepCopy() *SelfManagedKafkaAccessConfigurationVPC { + if in == nil { + return nil + } + out := new(SelfManagedKafkaAccessConfigurationVPC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tag) DeepCopyInto(out *Tag) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tag. +func (in *Tag) DeepCopy() *Tag { + if in == nil { + return nil + } + out := new(Tag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceActiveMQBrokerParameters) DeepCopyInto(out *UpdatePipeSourceActiveMQBrokerParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(MQBrokerAccessCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceActiveMQBrokerParameters. +func (in *UpdatePipeSourceActiveMQBrokerParameters) DeepCopy() *UpdatePipeSourceActiveMQBrokerParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceActiveMQBrokerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceDynamoDBStreamParameters) DeepCopyInto(out *UpdatePipeSourceDynamoDBStreamParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfig) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRecordAgeInSeconds != nil { + in, out := &in.MaximumRecordAgeInSeconds, &out.MaximumRecordAgeInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(int64) + **out = **in + } + if in.OnPartialBatchItemFailure != nil { + in, out := &in.OnPartialBatchItemFailure, &out.OnPartialBatchItemFailure + *out = new(string) + **out = **in + } + if in.ParallelizationFactor != nil { + in, out := &in.ParallelizationFactor, &out.ParallelizationFactor + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceDynamoDBStreamParameters. +func (in *UpdatePipeSourceDynamoDBStreamParameters) DeepCopy() *UpdatePipeSourceDynamoDBStreamParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceDynamoDBStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceKinesisStreamParameters) DeepCopyInto(out *UpdatePipeSourceKinesisStreamParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfig) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRecordAgeInSeconds != nil { + in, out := &in.MaximumRecordAgeInSeconds, &out.MaximumRecordAgeInSeconds + *out = new(int64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(int64) + **out = **in + } + if in.OnPartialBatchItemFailure != nil { + in, out := &in.OnPartialBatchItemFailure, &out.OnPartialBatchItemFailure + *out = new(string) + **out = **in + } + if in.ParallelizationFactor != nil { + in, out := &in.ParallelizationFactor, &out.ParallelizationFactor + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceKinesisStreamParameters. +func (in *UpdatePipeSourceKinesisStreamParameters) DeepCopy() *UpdatePipeSourceKinesisStreamParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceKinesisStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceManagedStreamingKafkaParameters) DeepCopyInto(out *UpdatePipeSourceManagedStreamingKafkaParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(MSKAccessCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceManagedStreamingKafkaParameters. +func (in *UpdatePipeSourceManagedStreamingKafkaParameters) DeepCopy() *UpdatePipeSourceManagedStreamingKafkaParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceManagedStreamingKafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceParameters) DeepCopyInto(out *UpdatePipeSourceParameters) { + *out = *in + if in.ActiveMQBrokerParameters != nil { + in, out := &in.ActiveMQBrokerParameters, &out.ActiveMQBrokerParameters + *out = new(UpdatePipeSourceActiveMQBrokerParameters) + (*in).DeepCopyInto(*out) + } + if in.DynamoDBStreamParameters != nil { + in, out := &in.DynamoDBStreamParameters, &out.DynamoDBStreamParameters + *out = new(UpdatePipeSourceDynamoDBStreamParameters) + (*in).DeepCopyInto(*out) + } + if in.FilterCriteria != nil { + in, out := &in.FilterCriteria, &out.FilterCriteria + *out = new(FilterCriteria) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamParameters != nil { + in, out := &in.KinesisStreamParameters, &out.KinesisStreamParameters + *out = new(UpdatePipeSourceKinesisStreamParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedStreamingKafkaParameters != nil { + in, out := &in.ManagedStreamingKafkaParameters, &out.ManagedStreamingKafkaParameters + *out = new(UpdatePipeSourceManagedStreamingKafkaParameters) + (*in).DeepCopyInto(*out) + } + if in.RabbitMQBrokerParameters != nil { + in, out := &in.RabbitMQBrokerParameters, &out.RabbitMQBrokerParameters + *out = new(UpdatePipeSourceRabbitMQBrokerParameters) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedKafkaParameters != nil { + in, out := &in.SelfManagedKafkaParameters, &out.SelfManagedKafkaParameters + *out = new(UpdatePipeSourceSelfManagedKafkaParameters) + (*in).DeepCopyInto(*out) + } + if in.SQSQueueParameters != nil { + in, out := &in.SQSQueueParameters, &out.SQSQueueParameters + *out = new(UpdatePipeSourceSQSQueueParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceParameters. +func (in *UpdatePipeSourceParameters) DeepCopy() *UpdatePipeSourceParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceRabbitMQBrokerParameters) DeepCopyInto(out *UpdatePipeSourceRabbitMQBrokerParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(MQBrokerAccessCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceRabbitMQBrokerParameters. +func (in *UpdatePipeSourceRabbitMQBrokerParameters) DeepCopy() *UpdatePipeSourceRabbitMQBrokerParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceRabbitMQBrokerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceSQSQueueParameters) DeepCopyInto(out *UpdatePipeSourceSQSQueueParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceSQSQueueParameters. +func (in *UpdatePipeSourceSQSQueueParameters) DeepCopy() *UpdatePipeSourceSQSQueueParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceSQSQueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePipeSourceSelfManagedKafkaParameters) DeepCopyInto(out *UpdatePipeSourceSelfManagedKafkaParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(int64) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(SelfManagedKafkaAccessConfigurationCredentials) + (*in).DeepCopyInto(*out) + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(int64) + **out = **in + } + if in.ServerRootCaCertificate != nil { + in, out := &in.ServerRootCaCertificate, &out.ServerRootCaCertificate + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(SelfManagedKafkaAccessConfigurationVPC) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePipeSourceSelfManagedKafkaParameters. +func (in *UpdatePipeSourceSelfManagedKafkaParameters) DeepCopy() *UpdatePipeSourceSelfManagedKafkaParameters { + if in == nil { + return nil + } + out := new(UpdatePipeSourceSelfManagedKafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationExceptionField) DeepCopyInto(out *ValidationExceptionField) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationExceptionField. +func (in *ValidationExceptionField) DeepCopy() *ValidationExceptionField { + if in == nil { + return nil + } + out := new(ValidationExceptionField) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 0cc2b62..8b911ec 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -34,6 +34,8 @@ import ( svctypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" svcresource "github.com/aws-controllers-k8s/pipes-controller/pkg/resource" + _ "github.com/aws-controllers-k8s/pipes-controller/pkg/resource/pipe" + "github.com/aws-controllers-k8s/pipes-controller/pkg/version" ) diff --git a/config/crd/bases/pipes.services.k8s.aws_pipes.yaml b/config/crd/bases/pipes.services.k8s.aws_pipes.yaml new file mode 100644 index 0000000..7b2cfc6 --- /dev/null +++ b/config/crd/bases/pipes.services.k8s.aws_pipes.yaml @@ -0,0 +1,874 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: pipes.pipes.services.k8s.aws +spec: + group: pipes.services.k8s.aws + names: + kind: Pipe + listKind: PipeList + plural: pipes + singular: pipe + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ackResourceMetadata.arn + name: ARN + priority: 1 + type: string + - jsonPath: .spec.source + name: SOURCE + priority: 1 + type: string + - jsonPath: .status.currentState + name: STATE + type: string + - jsonPath: .spec.target + name: TARGET + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Pipe is the Schema for the Pipes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "PipeSpec defines the desired state of Pipe. \n An object + that represents a pipe. Amazon EventBridgePipes connect event sources + to targets and reduces the need for specialized knowledge and integration + code." + properties: + description: + description: A description of the pipe. + type: string + desiredState: + description: The state the pipe should be in. + type: string + enrichment: + description: The ARN of the enrichment resource. + type: string + enrichmentParameters: + description: The parameters required to set up enrichment on your + pipe. + properties: + httpParameters: + description: These are custom parameter to be used when the target + is an API Gateway REST APIs or EventBridge ApiDestinations. + In the latter case, these are merged with any InvocationParameters + specified on the Connection, with any values from the Connection + taking precedence. + properties: + headerParameters: + additionalProperties: + type: string + type: object + pathParameterValues: + items: + type: string + type: array + queryStringParameters: + additionalProperties: + type: string + type: object + type: object + inputTemplate: + type: string + type: object + name: + description: The name of the pipe. + type: string + roleARN: + description: The ARN of the role that allows the pipe to send data + to the target. + type: string + source: + description: The ARN of the source resource. + type: string + sourceParameters: + description: The parameters required to set up a source for your pipe. + properties: + activeMQBrokerParameters: + description: The parameters for using an Active MQ broker as a + source. + properties: + batchSize: + format: int64 + type: integer + credentials: + description: The Secrets Manager secret that stores your broker + credentials. + properties: + basicAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + queueName: + type: string + type: object + dynamoDBStreamParameters: + description: The parameters for using a DynamoDB stream as a source. + properties: + batchSize: + format: int64 + type: integer + deadLetterConfig: + description: A DeadLetterConfig object that contains information + about a dead-letter queue configuration. + properties: + arn: + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + maximumRecordAgeInSeconds: + format: int64 + type: integer + maximumRetryAttempts: + format: int64 + type: integer + onPartialBatchItemFailure: + type: string + parallelizationFactor: + format: int64 + type: integer + startingPosition: + type: string + type: object + filterCriteria: + description: The collection of event patterns used to filter events. + For more information, see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + in the Amazon EventBridge User Guide. + properties: + filters: + items: + description: Filter events using an event pattern. For more + information, see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + in the Amazon EventBridge User Guide. + properties: + pattern: + type: string + type: object + type: array + type: object + kinesisStreamParameters: + description: The parameters for using a Kinesis stream as a source. + properties: + batchSize: + format: int64 + type: integer + deadLetterConfig: + description: A DeadLetterConfig object that contains information + about a dead-letter queue configuration. + properties: + arn: + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + maximumRecordAgeInSeconds: + format: int64 + type: integer + maximumRetryAttempts: + format: int64 + type: integer + onPartialBatchItemFailure: + type: string + parallelizationFactor: + format: int64 + type: integer + startingPosition: + type: string + startingPositionTimestamp: + format: date-time + type: string + type: object + managedStreamingKafkaParameters: + description: The parameters for using an MSK stream as a source. + properties: + batchSize: + format: int64 + type: integer + consumerGroupID: + type: string + credentials: + description: The Secrets Manager secret that stores your stream + credentials. + properties: + clientCertificateTLSAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + saslSCRAM512Auth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + startingPosition: + type: string + topicName: + type: string + type: object + rabbitMQBrokerParameters: + description: The parameters for using a Rabbit MQ broker as a + source. + properties: + batchSize: + format: int64 + type: integer + credentials: + description: The Secrets Manager secret that stores your broker + credentials. + properties: + basicAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + queueName: + type: string + virtualHost: + type: string + type: object + selfManagedKafkaParameters: + description: The parameters for using a self-managed Apache Kafka + stream as a source. + properties: + additionalBootstrapServers: + items: + type: string + type: array + batchSize: + format: int64 + type: integer + consumerGroupID: + type: string + credentials: + description: The Secrets Manager secret that stores your stream + credentials. + properties: + basicAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + clientCertificateTLSAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + saslSCRAM256Auth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + saslSCRAM512Auth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + serverRootCaCertificate: + description: // Optional SecretManager ARN which stores the + database credentials + type: string + startingPosition: + type: string + topicName: + type: string + vpc: + description: This structure specifies the VPC subnets and + security groups for the stream, and whether a public IP + address is to be used. + properties: + securityGroup: + description: List of SecurityGroupId. + items: + type: string + type: array + subnets: + description: List of SubnetId. + items: + type: string + type: array + type: object + type: object + sqsQueueParameters: + description: The parameters for using a Amazon SQS stream as a + source. + properties: + batchSize: + format: int64 + type: integer + maximumBatchingWindowInSeconds: + format: int64 + type: integer + type: object + type: object + tags: + additionalProperties: + type: string + description: The list of key-value pairs to associate with the pipe. + type: object + target: + description: The ARN of the target resource. + type: string + targetParameters: + description: The parameters required to set up a target for your pipe. + properties: + batchJobParameters: + description: The parameters for using an Batch job as a target. + properties: + arrayProperties: + description: The array properties for the submitted job, such + as the size of the array. The array size can be between + 2 and 10,000. If you specify array properties for a job, + it becomes an array job. This parameter is used only if + the target is an Batch job. + properties: + size: + format: int64 + type: integer + type: object + containerOverrides: + description: The overrides that are sent to a container. + properties: + command: + items: + type: string + type: array + environment: + items: + description: "The environment variables to send to the + container. You can add new environment variables, + which are added to the container at launch, or you + can override the existing environment variables from + the Docker image or the task definition. \n Environment + variables cannot start with \"Batch\". This naming + convention is reserved for variables that Batch sets." + properties: + name: + type: string + value: + type: string + type: object + type: array + instanceType: + type: string + resourceRequirements: + items: + description: The type and amount of a resource to assign + to a container. The supported resources include GPU, + MEMORY, and VCPU. + properties: + type_: + type: string + value: + type: string + type: object + type: array + type: object + dependsOn: + items: + description: An object that represents an Batch job dependency. + properties: + jobID: + type: string + type_: + type: string + type: object + type: array + jobDefinition: + type: string + jobName: + type: string + parameters: + additionalProperties: + type: string + type: object + retryStrategy: + description: The retry strategy that's associated with a job. + For more information, see Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) + in the Batch User Guide. + properties: + attempts: + format: int64 + type: integer + type: object + type: object + cloudWatchLogsParameters: + description: The parameters for using an CloudWatch Logs log stream + as a target. + properties: + logStreamName: + type: string + timestamp: + type: string + type: object + ecsTaskParameters: + description: The parameters for using an Amazon ECS task as a + target. + properties: + capacityProviderStrategy: + items: + description: The details of a capacity provider strategy. + To learn more, see CapacityProviderStrategyItem (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CapacityProviderStrategyItem.html) + in the Amazon ECS API Reference. + properties: + base: + format: int64 + type: integer + capacityProvider: + type: string + weight: + format: int64 + type: integer + type: object + type: array + enableECSManagedTags: + type: boolean + enableExecuteCommand: + type: boolean + group: + type: string + launchType: + type: string + networkConfiguration: + description: This structure specifies the network configuration + for an Amazon ECS task. + properties: + awsVPCConfiguration: + description: This structure specifies the VPC subnets + and security groups for the task, and whether a public + IP address is to be used. This structure is relevant + only for ECS tasks that use the awsvpc network mode. + properties: + assignPublicIP: + type: string + securityGroups: + items: + type: string + type: array + subnets: + items: + type: string + type: array + type: object + type: object + overrides: + description: The overrides that are associated with a task. + properties: + containerOverrides: + items: + description: 'The overrides that are sent to a container. + An empty container override can be passed in. An example + of an empty container override is {"containerOverrides": + [ ] }. If a non-empty container override is specified, + the name parameter must be included.' + properties: + command: + items: + type: string + type: array + cpu: + format: int64 + type: integer + environment: + items: + description: The environment variables to send + to the container. You can add new environment + variables, which are added to the container + at launch, or you can override the existing + environment variables from the Docker image + or the task definition. You must also specify + a container name. + properties: + name: + type: string + value: + type: string + type: object + type: array + environmentFiles: + items: + description: "A list of files containing the environment + variables to pass to a container. You can specify + up to ten environment files. The file must have + a .env file extension. Each line in an environment + file should contain an environment variable + in VARIABLE=VALUE format. Lines beginning with + # are treated as comments and are ignored. For + more information about the environment variable + file syntax, see Declare default environment + variables in file (https://docs.docker.com/compose/env-file/). + \n If there are environment variables specified + using the environment parameter in a container + definition, they take precedence over the variables + contained within an environment file. If multiple + environment files are specified that contain + the same variable, they're processed from the + top down. We recommend that you use unique variable + names. For more information, see Specifying + environment variables (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) + in the Amazon Elastic Container Service Developer + Guide. \n This parameter is only supported for + tasks hosted on Fargate using the following + platform versions: \n * Linux platform version + 1.4.0 or later. \n * Windows platform version + 1.0.0 or later." + properties: + type_: + type: string + value: + type: string + type: object + type: array + memory: + format: int64 + type: integer + memoryReservation: + format: int64 + type: integer + name: + type: string + resourceRequirements: + items: + description: The type and amount of a resource + to assign to a container. The supported resource + types are GPUs and Elastic Inference accelerators. + For more information, see Working with GPUs + on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html) + or Working with Amazon Elastic Inference on + Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html) + in the Amazon Elastic Container Service Developer + Guide + properties: + type_: + type: string + value: + type: string + type: object + type: array + type: object + type: array + cpu: + type: string + ephemeralStorage: + description: "The amount of ephemeral storage to allocate + for the task. This parameter is used to expand the total + amount of ephemeral storage available, beyond the default + amount, for tasks hosted on Fargate. For more information, + see Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) + in the Amazon ECS User Guide for Fargate. \n This parameter + is only supported for tasks hosted on Fargate using + Linux platform version 1.4.0 or later. This parameter + is not supported for Windows containers on Fargate." + properties: + sizeInGiB: + format: int64 + type: integer + type: object + executionRoleARN: + type: string + inferenceAcceleratorOverrides: + items: + description: Details on an Elastic Inference accelerator + task override. This parameter is used to override + the Elastic Inference accelerator specified in the + task definition. For more information, see Working + with Amazon Elastic Inference on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/userguide/ecs-inference.html) + in the Amazon Elastic Container Service Developer + Guide. + properties: + deviceName: + type: string + deviceType: + type: string + type: object + type: array + memory: + type: string + taskRoleARN: + type: string + type: object + placementConstraints: + items: + description: An object representing a constraint on task + placement. To learn more, see Task Placement Constraints + (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + expression: + type: string + type_: + type: string + type: object + type: array + placementStrategy: + items: + description: The task placement strategy for a task or service. + To learn more, see Task Placement Strategies (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html) + in the Amazon Elastic Container Service Service Developer + Guide. + properties: + field: + type: string + type_: + type: string + type: object + type: array + platformVersion: + type: string + propagateTags: + type: string + referenceID: + type: string + tags: + items: + description: A key-value pair associated with an Amazon + Web Services resource. In EventBridge, rules and event + buses support tagging. + properties: + key: + type: string + value: + type: string + type: object + type: array + taskCount: + format: int64 + type: integer + taskDefinitionARN: + type: string + type: object + eventBridgeEventBusParameters: + description: The parameters for using an EventBridge event bus + as a target. + properties: + detailType: + type: string + endpointID: + type: string + resources: + items: + type: string + type: array + source: + type: string + time: + type: string + type: object + httpParameters: + description: These are custom parameter to be used when the target + is an API Gateway REST APIs or EventBridge ApiDestinations. + properties: + headerParameters: + additionalProperties: + type: string + type: object + pathParameterValues: + items: + type: string + type: array + queryStringParameters: + additionalProperties: + type: string + type: object + type: object + inputTemplate: + type: string + kinesisStreamParameters: + description: The parameters for using a Kinesis stream as a source. + properties: + partitionKey: + type: string + type: object + lambdaFunctionParameters: + description: The parameters for using a Lambda function as a target. + properties: + invocationType: + type: string + type: object + redshiftDataParameters: + description: These are custom parameters to be used when the target + is a Amazon Redshift cluster to invoke the Amazon Redshift Data + API ExecuteStatement. + properties: + database: + description: // Redshift Database + type: string + dbUser: + description: // Database user name + type: string + secretManagerARN: + description: // For targets, can either specify an ARN or + a jsonpath pointing to the ARN. + type: string + sqls: + description: // A list of SQLs. + items: + type: string + type: array + statementName: + description: // A name for Redshift DataAPI statement which + can be used as filter of // ListStatement. + type: string + withEvent: + type: boolean + type: object + sageMakerPipelineParameters: + description: The parameters for using a SageMaker pipeline as + a target. + properties: + pipelineParameterList: + items: + description: Name/Value pair of a parameter to start execution + of a SageMaker Model Building Pipeline. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + sqsQueueParameters: + description: The parameters for using a Amazon SQS stream as a + source. + properties: + messageDeduplicationID: + type: string + messageGroupID: + type: string + type: object + stepFunctionStateMachineParameters: + description: The parameters for using a Step Functions state machine + as a target. + properties: + invocationType: + type: string + type: object + type: object + required: + - name + - roleARN + - source + - target + type: object + status: + description: PipeStatus defines the observed state of Pipe + properties: + ackResourceMetadata: + description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` + member that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: 'ARN is the Amazon Resource Name for the resource. + This is a globally-unique identifier and is set only by the + ACK service controller once the controller has orchestrated + the creation of the resource OR when it has verified that an + "adopted" resource (a resource where the ARN annotation was + set by the Kubernetes user on the CR) exists and matches the + supplied CR''s Spec field values. TODO(vijat@): Find a better + strategy for resources that do not have ARN in CreateOutputResponse + https://github.com/aws/aws-controllers-k8s/issues/270' + type: string + ownerAccountID: + description: OwnerAccountID is the AWS Account ID of the account + that owns the backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + conditions: + description: All CRS managed by ACK have a common `Status.Conditions` + member that contains a collection of `ackv1alpha1.Condition` objects + that describe the various terminal states of the CR and its backend + AWS service API resource + items: + description: Condition is the common struct used by all CRDs managed + by ACK service controllers to indicate terminal states of the + CR and its backend AWS service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + creationTime: + description: The time the pipe was created. + format: date-time + type: string + currentState: + description: The state the pipe is in. + type: string + lastModifiedTime: + description: When the pipe was last updated, in ISO-8601 format (https://www.w3.org/TR/NOTE-datetime) + (YYYY-MM-DDThh:mm:ss.sTZD). + format: date-time + type: string + stateReason: + description: The reason the pipe is in its current state. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index ea0b795..a0588ee 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,3 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - common + - bases/pipes.services.k8s.aws_pipes.yaml diff --git a/config/rbac/cluster-role-controller.yaml b/config/rbac/cluster-role-controller.yaml index 02303fd..a3198b1 100644 --- a/config/rbac/cluster-role-controller.yaml +++ b/config/rbac/cluster-role-controller.yaml @@ -31,6 +31,26 @@ rules: - list - patch - watch +- apiGroups: + - pipes.services.k8s.aws + resources: + - pipes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - pipes.services.k8s.aws + resources: + - pipes/status + verbs: + - get + - patch + - update - apiGroups: - services.k8s.aws resources: diff --git a/config/rbac/role-reader.yaml b/config/rbac/role-reader.yaml index 5f917b7..808e605 100644 --- a/config/rbac/role-reader.yaml +++ b/config/rbac/role-reader.yaml @@ -9,6 +9,7 @@ rules: - apiGroups: - pipes.services.k8s.aws resources: + - pipes verbs: - get - list diff --git a/config/rbac/role-writer.yaml b/config/rbac/role-writer.yaml index ff84cd9..7787d2d 100644 --- a/config/rbac/role-writer.yaml +++ b/config/rbac/role-writer.yaml @@ -9,6 +9,7 @@ rules: - apiGroups: - pipes.services.k8s.aws resources: + - pipes verbs: - create - delete @@ -20,6 +21,7 @@ rules: - apiGroups: - pipes.services.k8s.aws resources: + - pipes verbs: - get - patch diff --git a/generator.yaml b/generator.yaml index 55502e1..d66bcda 100644 --- a/generator.yaml +++ b/generator.yaml @@ -1,5 +1,61 @@ ignore: resource_names: - - Pipe + # - Pipe sdk_names: - model: pipes \ No newline at end of file + client_struct: Pipes + client_interface: PipesAPI +resources: + Pipe: + fields: + DesiredState: + compare: + is_ignored: true + SourceParameters: + compare: + is_ignored: true + TargetParameters: + compare: + is_ignored: true + EnrichmentParameters: + compare: + is_ignored: true + StateReason: + is_read_only: true + from: + operation: DescribePipe + path: StateReason + hooks: + delta_pre_compare: + code: customPreCompare(delta, a, b) + sdk_create_post_set_output: + template_path: hooks/pipe/sdk_create_post_set_output.go.tpl + sdk_update_pre_build_request: + template_path: hooks/pipe/sdk_update_pre_build_request.go.tpl + sdk_update_pre_set_output: + template_path: hooks/pipe/sdk_update_pre_set_output.go.tpl + sdk_update_post_build_request: + template_path: hooks/pipe/sdk_update_post_build_request.go.tpl + sdk_delete_post_request: + template_path: hooks/pipe/sdk_delete_post_request.go.tpl + print: + add_age_column: true + add_synced_column: true + additional_columns: + - name: ARN + json_path: .status.ackResourceMetadata.arn + type: string + priority: 1 + - name: SOURCE + json_path: .spec.source + type: string + priority: 1 + - name: TARGET + json_path: .spec.target + type: string + priority: 1 + - name: STATE + json_path: .status.currentState + type: string + exceptions: + terminal_codes: + - ValidationException diff --git a/go.mod b/go.mod index 10b00c5..c9a28d5 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,9 @@ go 1.19 require ( github.com/aws-controllers-k8s/runtime v0.24.1 github.com/aws/aws-sdk-go v1.44.218 + github.com/go-logr/logr v1.2.3 github.com/spf13/pflag v1.0.5 + k8s.io/api v0.26.1 k8s.io/apimachinery v0.26.1 k8s.io/client-go v0.26.1 sigs.k8s.io/controller-runtime v0.14.5 @@ -19,7 +21,6 @@ require ( github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect @@ -65,7 +66,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.26.1 // indirect k8s.io/apiextensions-apiserver v0.26.1 // indirect k8s.io/component-base v0.26.1 // indirect k8s.io/klog/v2 v2.80.1 // indirect diff --git a/helm/Chart.yaml b/helm/Chart.yaml index 5a1c734..6dac4dd 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: pipes-chart -description: A Helm chart for the ACK service controller for () +description: A Helm chart for the ACK service controller for Amazon EventBridge Pipes (Pipes) version: v0.0.0-non-release-version appVersion: v0.0.0-non-release-version home: https://github.com/aws-controllers-k8s/pipes-controller @@ -10,7 +10,7 @@ sources: maintainers: - name: ACK Admins url: https://github.com/orgs/aws-controllers-k8s/teams/ack-admin - - name: Admins + - name: Pipes Admins url: https://github.com/orgs/aws-controllers-k8s/teams/pipes-maintainer keywords: - aws diff --git a/helm/crds/pipes.services.k8s.aws_pipes.yaml b/helm/crds/pipes.services.k8s.aws_pipes.yaml new file mode 100644 index 0000000..ed564a7 --- /dev/null +++ b/helm/crds/pipes.services.k8s.aws_pipes.yaml @@ -0,0 +1,874 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: pipes.pipes.services.k8s.aws +spec: + group: pipes.services.k8s.aws + names: + kind: Pipe + listKind: PipeList + plural: pipes + singular: pipe + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ackResourceMetadata.arn + name: ARN + priority: 1 + type: string + - jsonPath: .spec.source + name: SOURCE + priority: 1 + type: string + - jsonPath: .status.currentState + name: STATE + type: string + - jsonPath: .spec.target + name: TARGET + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Pipe is the Schema for the Pipes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: "PipeSpec defines the desired state of Pipe. \n An object + that represents a pipe. Amazon EventBridgePipes connect event sources + to targets and reduces the need for specialized knowledge and integration + code." + properties: + description: + description: A description of the pipe. + type: string + desiredState: + description: The state the pipe should be in. + type: string + enrichment: + description: The ARN of the enrichment resource. + type: string + enrichmentParameters: + description: The parameters required to set up enrichment on your + pipe. + properties: + httpParameters: + description: These are custom parameter to be used when the target + is an API Gateway REST APIs or EventBridge ApiDestinations. + In the latter case, these are merged with any InvocationParameters + specified on the Connection, with any values from the Connection + taking precedence. + properties: + headerParameters: + additionalProperties: + type: string + type: object + pathParameterValues: + items: + type: string + type: array + queryStringParameters: + additionalProperties: + type: string + type: object + type: object + inputTemplate: + type: string + type: object + name: + description: The name of the pipe. + type: string + roleARN: + description: The ARN of the role that allows the pipe to send data + to the target. + type: string + source: + description: The ARN of the source resource. + type: string + sourceParameters: + description: The parameters required to set up a source for your pipe. + properties: + activeMQBrokerParameters: + description: The parameters for using an Active MQ broker as a + source. + properties: + batchSize: + format: int64 + type: integer + credentials: + description: The Secrets Manager secret that stores your broker + credentials. + properties: + basicAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + queueName: + type: string + type: object + dynamoDBStreamParameters: + description: The parameters for using a DynamoDB stream as a source. + properties: + batchSize: + format: int64 + type: integer + deadLetterConfig: + description: A DeadLetterConfig object that contains information + about a dead-letter queue configuration. + properties: + arn: + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + maximumRecordAgeInSeconds: + format: int64 + type: integer + maximumRetryAttempts: + format: int64 + type: integer + onPartialBatchItemFailure: + type: string + parallelizationFactor: + format: int64 + type: integer + startingPosition: + type: string + type: object + filterCriteria: + description: The collection of event patterns used to filter events. + For more information, see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + in the Amazon EventBridge User Guide. + properties: + filters: + items: + description: Filter events using an event pattern. For more + information, see Events and Event Patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) + in the Amazon EventBridge User Guide. + properties: + pattern: + type: string + type: object + type: array + type: object + kinesisStreamParameters: + description: The parameters for using a Kinesis stream as a source. + properties: + batchSize: + format: int64 + type: integer + deadLetterConfig: + description: A DeadLetterConfig object that contains information + about a dead-letter queue configuration. + properties: + arn: + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + maximumRecordAgeInSeconds: + format: int64 + type: integer + maximumRetryAttempts: + format: int64 + type: integer + onPartialBatchItemFailure: + type: string + parallelizationFactor: + format: int64 + type: integer + startingPosition: + type: string + startingPositionTimestamp: + format: date-time + type: string + type: object + managedStreamingKafkaParameters: + description: The parameters for using an MSK stream as a source. + properties: + batchSize: + format: int64 + type: integer + consumerGroupID: + type: string + credentials: + description: The Secrets Manager secret that stores your stream + credentials. + properties: + clientCertificateTLSAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + saslSCRAM512Auth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + startingPosition: + type: string + topicName: + type: string + type: object + rabbitMQBrokerParameters: + description: The parameters for using a Rabbit MQ broker as a + source. + properties: + batchSize: + format: int64 + type: integer + credentials: + description: The Secrets Manager secret that stores your broker + credentials. + properties: + basicAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + queueName: + type: string + virtualHost: + type: string + type: object + selfManagedKafkaParameters: + description: The parameters for using a self-managed Apache Kafka + stream as a source. + properties: + additionalBootstrapServers: + items: + type: string + type: array + batchSize: + format: int64 + type: integer + consumerGroupID: + type: string + credentials: + description: The Secrets Manager secret that stores your stream + credentials. + properties: + basicAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + clientCertificateTLSAuth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + saslSCRAM256Auth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + saslSCRAM512Auth: + description: // Optional SecretManager ARN which stores + the database credentials + type: string + type: object + maximumBatchingWindowInSeconds: + format: int64 + type: integer + serverRootCaCertificate: + description: // Optional SecretManager ARN which stores the + database credentials + type: string + startingPosition: + type: string + topicName: + type: string + vpc: + description: This structure specifies the VPC subnets and + security groups for the stream, and whether a public IP + address is to be used. + properties: + securityGroup: + description: List of SecurityGroupId. + items: + type: string + type: array + subnets: + description: List of SubnetId. + items: + type: string + type: array + type: object + type: object + sqsQueueParameters: + description: The parameters for using a Amazon SQS stream as a + source. + properties: + batchSize: + format: int64 + type: integer + maximumBatchingWindowInSeconds: + format: int64 + type: integer + type: object + type: object + tags: + additionalProperties: + type: string + description: The list of key-value pairs to associate with the pipe. + type: object + target: + description: The ARN of the target resource. + type: string + targetParameters: + description: The parameters required to set up a target for your pipe. + properties: + batchJobParameters: + description: The parameters for using an Batch job as a target. + properties: + arrayProperties: + description: The array properties for the submitted job, such + as the size of the array. The array size can be between + 2 and 10,000. If you specify array properties for a job, + it becomes an array job. This parameter is used only if + the target is an Batch job. + properties: + size: + format: int64 + type: integer + type: object + containerOverrides: + description: The overrides that are sent to a container. + properties: + command: + items: + type: string + type: array + environment: + items: + description: "The environment variables to send to the + container. You can add new environment variables, + which are added to the container at launch, or you + can override the existing environment variables from + the Docker image or the task definition. \n Environment + variables cannot start with \"Batch\". This naming + convention is reserved for variables that Batch sets." + properties: + name: + type: string + value: + type: string + type: object + type: array + instanceType: + type: string + resourceRequirements: + items: + description: The type and amount of a resource to assign + to a container. The supported resources include GPU, + MEMORY, and VCPU. + properties: + type_: + type: string + value: + type: string + type: object + type: array + type: object + dependsOn: + items: + description: An object that represents an Batch job dependency. + properties: + jobID: + type: string + type_: + type: string + type: object + type: array + jobDefinition: + type: string + jobName: + type: string + parameters: + additionalProperties: + type: string + type: object + retryStrategy: + description: The retry strategy that's associated with a job. + For more information, see Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) + in the Batch User Guide. + properties: + attempts: + format: int64 + type: integer + type: object + type: object + cloudWatchLogsParameters: + description: The parameters for using an CloudWatch Logs log stream + as a target. + properties: + logStreamName: + type: string + timestamp: + type: string + type: object + ecsTaskParameters: + description: The parameters for using an Amazon ECS task as a + target. + properties: + capacityProviderStrategy: + items: + description: The details of a capacity provider strategy. + To learn more, see CapacityProviderStrategyItem (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CapacityProviderStrategyItem.html) + in the Amazon ECS API Reference. + properties: + base: + format: int64 + type: integer + capacityProvider: + type: string + weight: + format: int64 + type: integer + type: object + type: array + enableECSManagedTags: + type: boolean + enableExecuteCommand: + type: boolean + group: + type: string + launchType: + type: string + networkConfiguration: + description: This structure specifies the network configuration + for an Amazon ECS task. + properties: + awsVPCConfiguration: + description: This structure specifies the VPC subnets + and security groups for the task, and whether a public + IP address is to be used. This structure is relevant + only for ECS tasks that use the awsvpc network mode. + properties: + assignPublicIP: + type: string + securityGroups: + items: + type: string + type: array + subnets: + items: + type: string + type: array + type: object + type: object + overrides: + description: The overrides that are associated with a task. + properties: + containerOverrides: + items: + description: 'The overrides that are sent to a container. + An empty container override can be passed in. An example + of an empty container override is {"containerOverrides": + [ ] }. If a non-empty container override is specified, + the name parameter must be included.' + properties: + command: + items: + type: string + type: array + cpu: + format: int64 + type: integer + environment: + items: + description: The environment variables to send + to the container. You can add new environment + variables, which are added to the container + at launch, or you can override the existing + environment variables from the Docker image + or the task definition. You must also specify + a container name. + properties: + name: + type: string + value: + type: string + type: object + type: array + environmentFiles: + items: + description: "A list of files containing the environment + variables to pass to a container. You can specify + up to ten environment files. The file must have + a .env file extension. Each line in an environment + file should contain an environment variable + in VARIABLE=VALUE format. Lines beginning with + # are treated as comments and are ignored. For + more information about the environment variable + file syntax, see Declare default environment + variables in file (https://docs.docker.com/compose/env-file/). + \n If there are environment variables specified + using the environment parameter in a container + definition, they take precedence over the variables + contained within an environment file. If multiple + environment files are specified that contain + the same variable, they're processed from the + top down. We recommend that you use unique variable + names. For more information, see Specifying + environment variables (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) + in the Amazon Elastic Container Service Developer + Guide. \n This parameter is only supported for + tasks hosted on Fargate using the following + platform versions: \n - Linux platform version + 1.4.0 or later. \n - Windows platform version + 1.0.0 or later." + properties: + type_: + type: string + value: + type: string + type: object + type: array + memory: + format: int64 + type: integer + memoryReservation: + format: int64 + type: integer + name: + type: string + resourceRequirements: + items: + description: The type and amount of a resource + to assign to a container. The supported resource + types are GPUs and Elastic Inference accelerators. + For more information, see Working with GPUs + on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html) + or Working with Amazon Elastic Inference on + Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html) + in the Amazon Elastic Container Service Developer + Guide + properties: + type_: + type: string + value: + type: string + type: object + type: array + type: object + type: array + cpu: + type: string + ephemeralStorage: + description: "The amount of ephemeral storage to allocate + for the task. This parameter is used to expand the total + amount of ephemeral storage available, beyond the default + amount, for tasks hosted on Fargate. For more information, + see Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) + in the Amazon ECS User Guide for Fargate. \n This parameter + is only supported for tasks hosted on Fargate using + Linux platform version 1.4.0 or later. This parameter + is not supported for Windows containers on Fargate." + properties: + sizeInGiB: + format: int64 + type: integer + type: object + executionRoleARN: + type: string + inferenceAcceleratorOverrides: + items: + description: Details on an Elastic Inference accelerator + task override. This parameter is used to override + the Elastic Inference accelerator specified in the + task definition. For more information, see Working + with Amazon Elastic Inference on Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/userguide/ecs-inference.html) + in the Amazon Elastic Container Service Developer + Guide. + properties: + deviceName: + type: string + deviceType: + type: string + type: object + type: array + memory: + type: string + taskRoleARN: + type: string + type: object + placementConstraints: + items: + description: An object representing a constraint on task + placement. To learn more, see Task Placement Constraints + (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + expression: + type: string + type_: + type: string + type: object + type: array + placementStrategy: + items: + description: The task placement strategy for a task or service. + To learn more, see Task Placement Strategies (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html) + in the Amazon Elastic Container Service Service Developer + Guide. + properties: + field: + type: string + type_: + type: string + type: object + type: array + platformVersion: + type: string + propagateTags: + type: string + referenceID: + type: string + tags: + items: + description: A key-value pair associated with an Amazon + Web Services resource. In EventBridge, rules and event + buses support tagging. + properties: + key: + type: string + value: + type: string + type: object + type: array + taskCount: + format: int64 + type: integer + taskDefinitionARN: + type: string + type: object + eventBridgeEventBusParameters: + description: The parameters for using an EventBridge event bus + as a target. + properties: + detailType: + type: string + endpointID: + type: string + resources: + items: + type: string + type: array + source: + type: string + time: + type: string + type: object + httpParameters: + description: These are custom parameter to be used when the target + is an API Gateway REST APIs or EventBridge ApiDestinations. + properties: + headerParameters: + additionalProperties: + type: string + type: object + pathParameterValues: + items: + type: string + type: array + queryStringParameters: + additionalProperties: + type: string + type: object + type: object + inputTemplate: + type: string + kinesisStreamParameters: + description: The parameters for using a Kinesis stream as a source. + properties: + partitionKey: + type: string + type: object + lambdaFunctionParameters: + description: The parameters for using a Lambda function as a target. + properties: + invocationType: + type: string + type: object + redshiftDataParameters: + description: These are custom parameters to be used when the target + is a Amazon Redshift cluster to invoke the Amazon Redshift Data + API ExecuteStatement. + properties: + database: + description: // Redshift Database + type: string + dbUser: + description: // Database user name + type: string + secretManagerARN: + description: // For targets, can either specify an ARN or + a jsonpath pointing to the ARN. + type: string + sqls: + description: // A list of SQLs. + items: + type: string + type: array + statementName: + description: // A name for Redshift DataAPI statement which + can be used as filter of // ListStatement. + type: string + withEvent: + type: boolean + type: object + sageMakerPipelineParameters: + description: The parameters for using a SageMaker pipeline as + a target. + properties: + pipelineParameterList: + items: + description: Name/Value pair of a parameter to start execution + of a SageMaker Model Building Pipeline. + properties: + name: + type: string + value: + type: string + type: object + type: array + type: object + sqsQueueParameters: + description: The parameters for using a Amazon SQS stream as a + source. + properties: + messageDeduplicationID: + type: string + messageGroupID: + type: string + type: object + stepFunctionStateMachineParameters: + description: The parameters for using a Step Functions state machine + as a target. + properties: + invocationType: + type: string + type: object + type: object + required: + - name + - roleARN + - source + - target + type: object + status: + description: PipeStatus defines the observed state of Pipe + properties: + ackResourceMetadata: + description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` + member that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: 'ARN is the Amazon Resource Name for the resource. + This is a globally-unique identifier and is set only by the + ACK service controller once the controller has orchestrated + the creation of the resource OR when it has verified that an + "adopted" resource (a resource where the ARN annotation was + set by the Kubernetes user on the CR) exists and matches the + supplied CR''s Spec field values. TODO(vijat@): Find a better + strategy for resources that do not have ARN in CreateOutputResponse + https://github.com/aws/aws-controllers-k8s/issues/270' + type: string + ownerAccountID: + description: OwnerAccountID is the AWS Account ID of the account + that owns the backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + conditions: + description: All CRS managed by ACK have a common `Status.Conditions` + member that contains a collection of `ackv1alpha1.Condition` objects + that describe the various terminal states of the CR and its backend + AWS service API resource + items: + description: Condition is the common struct used by all CRDs managed + by ACK service controllers to indicate terminal states of the + CR and its backend AWS service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + creationTime: + description: The time the pipe was created. + format: date-time + type: string + currentState: + description: The state the pipe is in. + type: string + lastModifiedTime: + description: When the pipe was last updated, in ISO-8601 format (https://www.w3.org/TR/NOTE-datetime) + (YYYY-MM-DDThh:mm:ss.sTZD). + format: date-time + type: string + stateReason: + description: The reason the pipe is in its current state. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm/templates/NOTES.txt b/helm/templates/NOTES.txt index d32049d..bf11a6c 100644 --- a/helm/templates/NOTES.txt +++ b/helm/templates/NOTES.txt @@ -4,7 +4,7 @@ This chart deploys "public.ecr.aws/aws-controllers-k8s/pipes-controller:v0.0.0-n Check its status by running: kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}" -You are now able to create () resources! +You are now able to create Amazon EventBridge Pipes (Pipes) resources! The controller is running in "{{ .Values.installScope }}" mode. The controller is configured to manage AWS resources in region: "{{ .Values.aws.region }}" diff --git a/helm/templates/cluster-role-controller.yaml b/helm/templates/cluster-role-controller.yaml index 539f4d9..3d6ea1b 100644 --- a/helm/templates/cluster-role-controller.yaml +++ b/helm/templates/cluster-role-controller.yaml @@ -46,6 +46,26 @@ rules: - list - patch - watch +- apiGroups: + - pipes.services.k8s.aws + resources: + - pipes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - pipes.services.k8s.aws + resources: + - pipes/status + verbs: + - get + - patch + - update - apiGroups: - services.k8s.aws resources: diff --git a/helm/templates/role-reader.yaml b/helm/templates/role-reader.yaml index 18104de..246958b 100644 --- a/helm/templates/role-reader.yaml +++ b/helm/templates/role-reader.yaml @@ -9,6 +9,7 @@ rules: - apiGroups: - pipes.services.k8s.aws resources: + - pipes verbs: - get - list diff --git a/helm/templates/role-writer.yaml b/helm/templates/role-writer.yaml index 7ef8f77..7c232ca 100644 --- a/helm/templates/role-writer.yaml +++ b/helm/templates/role-writer.yaml @@ -9,6 +9,8 @@ rules: - apiGroups: - pipes.services.k8s.aws resources: + - pipes + verbs: - create - delete @@ -20,6 +22,7 @@ rules: - apiGroups: - pipes.services.k8s.aws resources: + - pipes verbs: - get - patch diff --git a/metadata.yaml b/metadata.yaml index ba0d580..2b5bc5f 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -1,8 +1,8 @@ service: - full_name: - short_name: - link: - documentation: + full_name: "Amazon EventBridge Pipes" + short_name: "Pipes" + link: "https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html" + documentation: "https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/Welcome.html" api_versions: - api_version: v1alpha1 status: available diff --git a/pkg/resource/pipe/delta.go b/pkg/resource/pipe/delta.go new file mode 100644 index 0000000..05582de --- /dev/null +++ b/pkg/resource/pipe/delta.go @@ -0,0 +1,94 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + "bytes" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" +) + +// Hack to avoid import errors during build... +var ( + _ = &bytes.Buffer{} + _ = &reflect.Method{} + _ = &acktags.Tags{} +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + customPreCompare(delta, a, b) + + if ackcompare.HasNilDifference(a.ko.Spec.Description, b.ko.Spec.Description) { + delta.Add("Spec.Description", a.ko.Spec.Description, b.ko.Spec.Description) + } else if a.ko.Spec.Description != nil && b.ko.Spec.Description != nil { + if *a.ko.Spec.Description != *b.ko.Spec.Description { + delta.Add("Spec.Description", a.ko.Spec.Description, b.ko.Spec.Description) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Enrichment, b.ko.Spec.Enrichment) { + delta.Add("Spec.Enrichment", a.ko.Spec.Enrichment, b.ko.Spec.Enrichment) + } else if a.ko.Spec.Enrichment != nil && b.ko.Spec.Enrichment != nil { + if *a.ko.Spec.Enrichment != *b.ko.Spec.Enrichment { + delta.Add("Spec.Enrichment", a.ko.Spec.Enrichment, b.ko.Spec.Enrichment) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Name, b.ko.Spec.Name) { + delta.Add("Spec.Name", a.ko.Spec.Name, b.ko.Spec.Name) + } else if a.ko.Spec.Name != nil && b.ko.Spec.Name != nil { + if *a.ko.Spec.Name != *b.ko.Spec.Name { + delta.Add("Spec.Name", a.ko.Spec.Name, b.ko.Spec.Name) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } else if a.ko.Spec.RoleARN != nil && b.ko.Spec.RoleARN != nil { + if *a.ko.Spec.RoleARN != *b.ko.Spec.RoleARN { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Source, b.ko.Spec.Source) { + delta.Add("Spec.Source", a.ko.Spec.Source, b.ko.Spec.Source) + } else if a.ko.Spec.Source != nil && b.ko.Spec.Source != nil { + if *a.ko.Spec.Source != *b.ko.Spec.Source { + delta.Add("Spec.Source", a.ko.Spec.Source, b.ko.Spec.Source) + } + } + if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) + } + if ackcompare.HasNilDifference(a.ko.Spec.Target, b.ko.Spec.Target) { + delta.Add("Spec.Target", a.ko.Spec.Target, b.ko.Spec.Target) + } else if a.ko.Spec.Target != nil && b.ko.Spec.Target != nil { + if *a.ko.Spec.Target != *b.ko.Spec.Target { + delta.Add("Spec.Target", a.ko.Spec.Target, b.ko.Spec.Target) + } + } + + return delta +} diff --git a/pkg/resource/pipe/descriptor.go b/pkg/resource/pipe/descriptor.go new file mode 100644 index 0000000..9f82d61 --- /dev/null +++ b/pkg/resource/pipe/descriptor.go @@ -0,0 +1,154 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + svcapitypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" +) + +const ( + finalizerString = "finalizers.pipes.services.k8s.aws/Pipe" +) + +var ( + GroupVersionResource = svcapitypes.GroupVersion.WithResource("pipes") + GroupKind = metav1.GroupKind{ + Group: "pipes.services.k8s.aws", + Kind: "Pipe", + } +) + +// resourceDescriptor implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceDescriptor` interface +type resourceDescriptor struct { +} + +// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the +// API Group and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupKind() *metav1.GroupKind { + return &GroupKind +} + +// EmptyRuntimeObject returns an empty object prototype that may be used in +// apimachinery and k8s client operations +func (d *resourceDescriptor) EmptyRuntimeObject() rtclient.Object { + return &svcapitypes.Pipe{} +} + +// ResourceFromRuntimeObject returns an AWSResource that has been initialized +// with the supplied runtime.Object +func (d *resourceDescriptor) ResourceFromRuntimeObject( + obj rtclient.Object, +) acktypes.AWSResource { + return &resource{ + ko: obj.(*svcapitypes.Pipe), + } +} + +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) +} + +// IsManaged returns true if the supplied AWSResource is under the management +// of an ACK service controller. What this means in practice is that the +// underlying custom resource (CR) in the AWSResource has had a +// resource-specific finalizer associated with it. +func (d *resourceDescriptor) IsManaged( + res acktypes.AWSResource, +) bool { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + // Remove use of custom code once + // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is + // fixed. This should be able to be: + // + // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) + return containsFinalizer(obj, finalizerString) +} + +// Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 +// is fixed. +func containsFinalizer(obj rtclient.Object, finalizer string) bool { + f := obj.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// MarkManaged places the supplied resource under the management of ACK. What +// this typically means is that the resource manager will decorate the +// underlying custom resource (CR) with a finalizer that indicates ACK is +// managing the resource and the underlying CR may not be deleted until ACK is +// finished cleaning up any backend AWS service resources associated with the +// CR. +func (d *resourceDescriptor) MarkManaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.AddFinalizer(obj, finalizerString) +} + +// MarkUnmanaged removes the supplied resource from management by ACK. What +// this typically means is that the resource manager will remove a finalizer +// underlying custom resource (CR) that indicates ACK is managing the resource. +// This will allow the Kubernetes API server to delete the underlying CR. +func (d *resourceDescriptor) MarkUnmanaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.RemoveFinalizer(obj, finalizerString) +} + +// MarkAdopted places descriptors on the custom resource that indicate the +// resource was not created from within ACK. +func (d *resourceDescriptor) MarkAdopted( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeObject in AWSResource") + } + curr := obj.GetAnnotations() + if curr == nil { + curr = make(map[string]string) + } + curr[ackv1alpha1.AnnotationAdopted] = "true" + obj.SetAnnotations(curr) +} diff --git a/pkg/resource/pipe/hooks.go b/pkg/resource/pipe/hooks.go new file mode 100644 index 0000000..b275297 --- /dev/null +++ b/pkg/resource/pipe/hooks.go @@ -0,0 +1,1002 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pipe + +import ( + "fmt" + "reflect" + "time" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + svcsdk "github.com/aws/aws-sdk-go/service/pipes" + + svcapitypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" +) + +// _____ _ _____ _ _____ ____ ____ _ ____ _____ _____ +// / __// \ |\/ __// \ /|/__ __\/ _ \/ __\/ \/ _ \/ __// __/ +// | \ | | //| \ | |\ || / \ | | //| \/|| || | \|| | _| \ +// | /_ | \// | /_ | | \|| | | | |_\\| /| || |_/|| |_//| /_ +// \____\\__/ \____\\_/ \| \_/ \____/\_/\_\\_/\____/\____\\____\ +// +// _ _ _ ____ ___ __ _ ____ _ ____ _____ ____ +// \||/ / |/ _ \\ \//\||/ / __\/ \/ __\/ __// ___\ +// | || / \| \ / | \/|| || \/|| \ | \ +// /\_| || |-|| / / | __/| || __/| /_ \___ | +// \____/\_/ \|/_/ \_/ \_/\_/ \____\\____/ + +const ( + defaultRequeueDelay = time.Second * 5 +) + +var ( + requeueWaitWhileCreating = ackrequeue.NeededAfter( + fmt.Errorf("pipe in status %q, requeueing", svcsdk.PipeStateCreating), + defaultRequeueDelay, + ) + + requeueWaitWhileUpdating = ackrequeue.NeededAfter( + fmt.Errorf("pipe in status %q, cannot be modified or deleted", svcsdk.PipeStateUpdating), + defaultRequeueDelay, + ) + + requeueWaitWhileDeleting = ackrequeue.NeededAfter( + fmt.Errorf("pipe in status %q, cannot be modified or deleted", svcsdk.PipeStateDeleting), + defaultRequeueDelay, + ) +) + +// hasNilDifference returns true if the supplied subjects' nilness is +// different +func hasNilDifference(a, b interface{}) bool { + if isEmpty(a) || isEmpty(b) { + if (isEmpty(a) && isNotEmpty(b)) || (isEmpty(b) && isNotEmpty(a)) { + return true + } + } + return false +} + +// isEmpty checks the passed interface argument for Nil or empty struct value +// (with zero values). For interfaces, only 'i==nil' check is not sufficient. +// https://tour.golang.org/methods/12 More details: +// https://mangatmodi.medium.com/go-check-nil-interface-the-right-way-d142776edef1 +func isEmpty(i interface{}) bool { + if i == nil { + return true + } + + switch reflect.TypeOf(i).Kind() { + case reflect.Ptr, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice: + if reflect.ValueOf(i).IsNil() { + return true + } + + if reflect.ValueOf(i).Elem().Kind() == reflect.Struct { + return reflect.ValueOf(i).Elem().IsZero() + } + } + return false +} + +func isNotEmpty(i interface{}) bool { + return !isEmpty(i) +} + +func customPreCompare( + delta *ackcompare.Delta, + a *resource, + b *resource, +) { + aDesiredState := a.ko.Spec.DesiredState + bDesiredState := b.ko.Spec.DesiredState + + // assumes API always returns desiredState + running := aDesiredState == nil || *aDesiredState == "" || *aDesiredState == svcsdk.PipeStateRunning + if !(running && *bDesiredState == svcsdk.PipeStateRunning) { + if *aDesiredState != *bDesiredState { + delta.Add("Spec.DesiredState", aDesiredState, bDesiredState) + } + } + + // hack (by the great @a-hilaly): forces a requeue in update if currentState != desiredState to reconcile + // and update status fields in Kubernetes resource e.g., in case of UPDATE_FAILED + if *bDesiredState != *b.ko.Status.CurrentState { + // setting Spec. because Status. is not considered in delta logic + delta.Add("Spec.CurrentState", *bDesiredState, *b.ko.Status.CurrentState) + } + + if hasNilDifference(a.ko.Spec.SourceParameters, b.ko.Spec.SourceParameters) { + delta.Add("Spec.SourceParameters", a.ko.Spec.SourceParameters, b.ko.Spec.SourceParameters) + } else if a.ko.Spec.SourceParameters != nil && b.ko.Spec.SourceParameters != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.ActiveMQBrokerParameters, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters) { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters) + } else if a.ko.Spec.SourceParameters.ActiveMQBrokerParameters != nil && b.ko.Spec.SourceParameters.ActiveMQBrokerParameters != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize) { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize) + } else if a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize != nil && b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize != nil { + if *a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize != *b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials) { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.Credentials", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials) + } else if a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials != nil && b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth) { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth) + } else if a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth != nil && b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth != nil { + if *a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth != *b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds) { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds) + } else if a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds != nil && b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + if *a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds != *b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName) { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.QueueName", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName) + } else if a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName != nil && b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName != nil { + if *a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName != *b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName { + delta.Add("Spec.SourceParameters.ActiveMQBrokerParameters.QueueName", a.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName, b.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters, b.ko.Spec.SourceParameters.DynamoDBStreamParameters) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters", a.ko.Spec.SourceParameters.DynamoDBStreamParameters, b.ko.Spec.SourceParameters.DynamoDBStreamParameters) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.BatchSize", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.BatchSize", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition) { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition) + } else if a.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition != nil && b.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition != nil { + if *a.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition != *b.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition { + delta.Add("Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition", a.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition, b.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.FilterCriteria, b.ko.Spec.SourceParameters.FilterCriteria) { + delta.Add("Spec.SourceParameters.FilterCriteria", a.ko.Spec.SourceParameters.FilterCriteria, b.ko.Spec.SourceParameters.FilterCriteria) + } else if a.ko.Spec.SourceParameters.FilterCriteria != nil && b.ko.Spec.SourceParameters.FilterCriteria != nil { + if !reflect.DeepEqual(a.ko.Spec.SourceParameters.FilterCriteria.Filters, b.ko.Spec.SourceParameters.FilterCriteria.Filters) { + delta.Add("Spec.SourceParameters.FilterCriteria.Filters", a.ko.Spec.SourceParameters.FilterCriteria.Filters, b.ko.Spec.SourceParameters.FilterCriteria.Filters) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters, b.ko.Spec.SourceParameters.KinesisStreamParameters) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters", a.ko.Spec.SourceParameters.KinesisStreamParameters, b.ko.Spec.SourceParameters.KinesisStreamParameters) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize, b.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.BatchSize", a.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize, b.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize != *b.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.BatchSize", a.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize, b.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig, b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig", a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig, b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN, b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN", a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN, b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN != *b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN", a.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN, b.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds != *b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds", a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds != *b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds", a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts", a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts != *b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts", a.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts, b.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure, b.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure", a.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure, b.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure != *b.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure", a.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure, b.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor, b.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor", a.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor, b.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor != *b.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor", a.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor, b.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition, b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.StartingPosition", a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition, b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition != nil { + if *a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition != *b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.StartingPosition", a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition, b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp, b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp", a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp, b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp) + } else if a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp != nil && b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp != nil { + if !a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp.Equal(b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp) { + delta.Add("Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp", a.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp, b.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize != nil { + if *a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize != *b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID != nil { + if *a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID != *b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth != nil { + if *a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth != *b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth != nil { + if *a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth != *b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds != nil { + if *a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds != *b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition != nil { + if *a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition != *b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName) { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName) + } else if a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName != nil && b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName != nil { + if *a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName != *b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName { + delta.Add("Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName", a.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName, b.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.RabbitMQBrokerParameters, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters) { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters) + } else if a.ko.Spec.SourceParameters.RabbitMQBrokerParameters != nil && b.ko.Spec.SourceParameters.RabbitMQBrokerParameters != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize) { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize) + } else if a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize != nil && b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize != nil { + if *a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize != *b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials) { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.Credentials", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials) + } else if a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials != nil && b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth) { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth) + } else if a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth != nil && b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth != nil { + if *a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth != *b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds) { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds) + } else if a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds != nil && b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + if *a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds != *b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName) { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.QueueName", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName) + } else if a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName != nil && b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName != nil { + if *a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName != *b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.QueueName", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost) { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost) + } else if a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost != nil && b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost != nil { + if *a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost != *b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost { + delta.Add("Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost", a.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost, b.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters != nil { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers) + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.TopicName", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName != nil { + if *a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName != *b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.TopicName", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.VPC", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC) + } else if a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC != nil && b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC != nil { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup) + } + if !ackcompare.SliceStringPEqual(a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets) { + delta.Add("Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets", a.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets, b.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets) + } + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SQSQueueParameters, b.ko.Spec.SourceParameters.SQSQueueParameters) { + delta.Add("Spec.SourceParameters.SQSQueueParameters", a.ko.Spec.SourceParameters.SQSQueueParameters, b.ko.Spec.SourceParameters.SQSQueueParameters) + } else if a.ko.Spec.SourceParameters.SQSQueueParameters != nil && b.ko.Spec.SourceParameters.SQSQueueParameters != nil { + if hasNilDifference(a.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize, b.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize) { + delta.Add("Spec.SourceParameters.SQSQueueParameters.BatchSize", a.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize, b.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize) + } else if a.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize != nil && b.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize != nil { + if *a.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize != *b.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize { + delta.Add("Spec.SourceParameters.SQSQueueParameters.BatchSize", a.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize, b.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize) + } + } + if hasNilDifference(a.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds) { + delta.Add("Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds) + } else if a.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds != nil && b.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds != nil { + if *a.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds != *b.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds { + delta.Add("Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds", a.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds, b.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds) + } + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters, b.ko.Spec.TargetParameters) { + delta.Add("Spec.TargetParameters", a.ko.Spec.TargetParameters, b.ko.Spec.TargetParameters) + } else if a.ko.Spec.TargetParameters != nil && b.ko.Spec.TargetParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters, b.ko.Spec.TargetParameters.BatchJobParameters) { + delta.Add("Spec.TargetParameters.BatchJobParameters", a.ko.Spec.TargetParameters.BatchJobParameters, b.ko.Spec.TargetParameters.BatchJobParameters) + } else if a.ko.Spec.TargetParameters.BatchJobParameters != nil && b.ko.Spec.TargetParameters.BatchJobParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties, b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties) { + delta.Add("Spec.TargetParameters.BatchJobParameters.ArrayProperties", a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties, b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties != nil && b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size, b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size) { + delta.Add("Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size", a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size, b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size != nil && b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size != nil { + if *a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size != *b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size { + delta.Add("Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size", a.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size, b.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides) { + delta.Add("Spec.TargetParameters.BatchJobParameters.ContainerOverrides", a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides != nil && b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides != nil { + if !ackcompare.SliceStringPEqual(a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command) { + delta.Add("Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command", a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command) + } + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment) { + delta.Add("Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment", a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment) + } + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType) { + delta.Add("Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType", a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType != nil && b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType != nil { + if *a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType != *b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType { + delta.Add("Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType", a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType) + } + } + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements) { + delta.Add("Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements", a.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements, b.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements) + } + } + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.BatchJobParameters.DependsOn, b.ko.Spec.TargetParameters.BatchJobParameters.DependsOn) { + delta.Add("Spec.TargetParameters.BatchJobParameters.DependsOn", a.ko.Spec.TargetParameters.BatchJobParameters.DependsOn, b.ko.Spec.TargetParameters.BatchJobParameters.DependsOn) + } + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition, b.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition) { + delta.Add("Spec.TargetParameters.BatchJobParameters.JobDefinition", a.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition, b.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition != nil && b.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition != nil { + if *a.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition != *b.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition { + delta.Add("Spec.TargetParameters.BatchJobParameters.JobDefinition", a.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition, b.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.JobName, b.ko.Spec.TargetParameters.BatchJobParameters.JobName) { + delta.Add("Spec.TargetParameters.BatchJobParameters.JobName", a.ko.Spec.TargetParameters.BatchJobParameters.JobName, b.ko.Spec.TargetParameters.BatchJobParameters.JobName) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.JobName != nil && b.ko.Spec.TargetParameters.BatchJobParameters.JobName != nil { + if *a.ko.Spec.TargetParameters.BatchJobParameters.JobName != *b.ko.Spec.TargetParameters.BatchJobParameters.JobName { + delta.Add("Spec.TargetParameters.BatchJobParameters.JobName", a.ko.Spec.TargetParameters.BatchJobParameters.JobName, b.ko.Spec.TargetParameters.BatchJobParameters.JobName) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.Parameters, b.ko.Spec.TargetParameters.BatchJobParameters.Parameters) { + delta.Add("Spec.TargetParameters.BatchJobParameters.Parameters", a.ko.Spec.TargetParameters.BatchJobParameters.Parameters, b.ko.Spec.TargetParameters.BatchJobParameters.Parameters) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.Parameters != nil && b.ko.Spec.TargetParameters.BatchJobParameters.Parameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.TargetParameters.BatchJobParameters.Parameters, b.ko.Spec.TargetParameters.BatchJobParameters.Parameters) { + delta.Add("Spec.TargetParameters.BatchJobParameters.Parameters", a.ko.Spec.TargetParameters.BatchJobParameters.Parameters, b.ko.Spec.TargetParameters.BatchJobParameters.Parameters) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy, b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy) { + delta.Add("Spec.TargetParameters.BatchJobParameters.RetryStrategy", a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy, b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy != nil && b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts, b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts) { + delta.Add("Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts", a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts, b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts) + } else if a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts != nil && b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts != nil { + if *a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts != *b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts { + delta.Add("Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts", a.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts, b.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts) + } + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.CloudWatchLogsParameters, b.ko.Spec.TargetParameters.CloudWatchLogsParameters) { + delta.Add("Spec.TargetParameters.CloudWatchLogsParameters", a.ko.Spec.TargetParameters.CloudWatchLogsParameters, b.ko.Spec.TargetParameters.CloudWatchLogsParameters) + } else if a.ko.Spec.TargetParameters.CloudWatchLogsParameters != nil && b.ko.Spec.TargetParameters.CloudWatchLogsParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName, b.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName) { + delta.Add("Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName", a.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName, b.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName) + } else if a.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName != nil && b.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName != nil { + if *a.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName != *b.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName { + delta.Add("Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName", a.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName, b.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp, b.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp) { + delta.Add("Spec.TargetParameters.CloudWatchLogsParameters.Timestamp", a.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp, b.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp) + } else if a.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp != nil && b.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp != nil { + if *a.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp != *b.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp { + delta.Add("Spec.TargetParameters.CloudWatchLogsParameters.Timestamp", a.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp, b.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters, b.ko.Spec.TargetParameters.ECSTaskParameters) { + delta.Add("Spec.TargetParameters.ECSTaskParameters", a.ko.Spec.TargetParameters.ECSTaskParameters, b.ko.Spec.TargetParameters.ECSTaskParameters) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters != nil && b.ko.Spec.TargetParameters.ECSTaskParameters != nil { + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy, b.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy", a.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy, b.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy) + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags, b.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags", a.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags, b.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags != *b.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags { + delta.Add("Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags", a.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags, b.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand, b.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand", a.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand, b.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand != *b.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand { + delta.Add("Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand", a.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand, b.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Group, b.ko.Spec.TargetParameters.ECSTaskParameters.Group) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Group", a.ko.Spec.TargetParameters.ECSTaskParameters.Group, b.ko.Spec.TargetParameters.ECSTaskParameters.Group) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Group != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Group != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.Group != *b.ko.Spec.TargetParameters.ECSTaskParameters.Group { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Group", a.ko.Spec.TargetParameters.ECSTaskParameters.Group, b.ko.Spec.TargetParameters.ECSTaskParameters.Group) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType, b.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.LaunchType", a.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType, b.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType != *b.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType { + delta.Add("Spec.TargetParameters.ECSTaskParameters.LaunchType", a.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType, b.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration", a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration", a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP", a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP != *b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP { + delta.Add("Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP", a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP) + } + } + if !ackcompare.SliceStringPEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups", a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups) + } + if !ackcompare.SliceStringPEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets", a.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets, b.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides != nil { + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides) + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.CPU", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU != *b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.CPU", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB != *b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN != *b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN) + } + } + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides) + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.Memory", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory != *b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.Memory", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN != *b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN", a.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN, b.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN) + } + } + } + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints, b.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.PlacementConstraints", a.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints, b.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints) + } + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy, b.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.PlacementStrategy", a.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy, b.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy) + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion, b.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.PlatformVersion", a.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion, b.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion != *b.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion { + delta.Add("Spec.TargetParameters.ECSTaskParameters.PlatformVersion", a.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion, b.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags, b.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.PropagateTags", a.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags, b.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags != *b.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags { + delta.Add("Spec.TargetParameters.ECSTaskParameters.PropagateTags", a.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags, b.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID, b.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.ReferenceID", a.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID, b.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID != *b.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID { + delta.Add("Spec.TargetParameters.ECSTaskParameters.ReferenceID", a.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID, b.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID) + } + } + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.ECSTaskParameters.Tags, b.ko.Spec.TargetParameters.ECSTaskParameters.Tags) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.Tags", a.ko.Spec.TargetParameters.ECSTaskParameters.Tags, b.ko.Spec.TargetParameters.ECSTaskParameters.Tags) + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount, b.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.TaskCount", a.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount, b.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount != *b.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount { + delta.Add("Spec.TargetParameters.ECSTaskParameters.TaskCount", a.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount, b.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN, b.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN) { + delta.Add("Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN", a.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN, b.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN) + } else if a.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN != nil && b.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN != nil { + if *a.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN != *b.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN { + delta.Add("Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN", a.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN, b.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.EventBridgeEventBusParameters, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters) { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters) + } else if a.ko.Spec.TargetParameters.EventBridgeEventBusParameters != nil && b.ko.Spec.TargetParameters.EventBridgeEventBusParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType) { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.DetailType", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType) + } else if a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType != nil && b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType != nil { + if *a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType != *b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.DetailType", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID) { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID) + } else if a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID != nil && b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID != nil { + if *a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID != *b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID) + } + } + if !ackcompare.SliceStringPEqual(a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources) { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.Resources", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources) + } + if hasNilDifference(a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source) { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.Source", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source) + } else if a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source != nil && b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source != nil { + if *a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source != *b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.Source", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time) { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.Time", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time) + } else if a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time != nil && b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time != nil { + if *a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time != *b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time { + delta.Add("Spec.TargetParameters.EventBridgeEventBusParameters.Time", a.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time, b.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.HTTPParameters, b.ko.Spec.TargetParameters.HTTPParameters) { + delta.Add("Spec.TargetParameters.HTTPParameters", a.ko.Spec.TargetParameters.HTTPParameters, b.ko.Spec.TargetParameters.HTTPParameters) + } else if a.ko.Spec.TargetParameters.HTTPParameters != nil && b.ko.Spec.TargetParameters.HTTPParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters, b.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters) { + delta.Add("Spec.TargetParameters.HTTPParameters.HeaderParameters", a.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters, b.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters) + } else if a.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters != nil && b.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters, b.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters) { + delta.Add("Spec.TargetParameters.HTTPParameters.HeaderParameters", a.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters, b.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters) + } + } + if !ackcompare.SliceStringPEqual(a.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues, b.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues) { + delta.Add("Spec.TargetParameters.HTTPParameters.PathParameterValues", a.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues, b.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues) + } + if hasNilDifference(a.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters) { + delta.Add("Spec.TargetParameters.HTTPParameters.QueryStringParameters", a.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters) + } else if a.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters != nil && b.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters) { + delta.Add("Spec.TargetParameters.HTTPParameters.QueryStringParameters", a.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.InputTemplate, b.ko.Spec.TargetParameters.InputTemplate) { + delta.Add("Spec.TargetParameters.InputTemplate", a.ko.Spec.TargetParameters.InputTemplate, b.ko.Spec.TargetParameters.InputTemplate) + } else if a.ko.Spec.TargetParameters.InputTemplate != nil && b.ko.Spec.TargetParameters.InputTemplate != nil { + if *a.ko.Spec.TargetParameters.InputTemplate != *b.ko.Spec.TargetParameters.InputTemplate { + delta.Add("Spec.TargetParameters.InputTemplate", a.ko.Spec.TargetParameters.InputTemplate, b.ko.Spec.TargetParameters.InputTemplate) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.KinesisStreamParameters, b.ko.Spec.TargetParameters.KinesisStreamParameters) { + delta.Add("Spec.TargetParameters.KinesisStreamParameters", a.ko.Spec.TargetParameters.KinesisStreamParameters, b.ko.Spec.TargetParameters.KinesisStreamParameters) + } else if a.ko.Spec.TargetParameters.KinesisStreamParameters != nil && b.ko.Spec.TargetParameters.KinesisStreamParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey, b.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey) { + delta.Add("Spec.TargetParameters.KinesisStreamParameters.PartitionKey", a.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey, b.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey) + } else if a.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey != nil && b.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey != nil { + if *a.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey != *b.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey { + delta.Add("Spec.TargetParameters.KinesisStreamParameters.PartitionKey", a.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey, b.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.LambdaFunctionParameters, b.ko.Spec.TargetParameters.LambdaFunctionParameters) { + delta.Add("Spec.TargetParameters.LambdaFunctionParameters", a.ko.Spec.TargetParameters.LambdaFunctionParameters, b.ko.Spec.TargetParameters.LambdaFunctionParameters) + } else if a.ko.Spec.TargetParameters.LambdaFunctionParameters != nil && b.ko.Spec.TargetParameters.LambdaFunctionParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType, b.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType) { + delta.Add("Spec.TargetParameters.LambdaFunctionParameters.InvocationType", a.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType, b.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType) + } else if a.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType != nil && b.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType != nil { + if *a.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType != *b.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType { + delta.Add("Spec.TargetParameters.LambdaFunctionParameters.InvocationType", a.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType, b.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.RedshiftDataParameters, b.ko.Spec.TargetParameters.RedshiftDataParameters) { + delta.Add("Spec.TargetParameters.RedshiftDataParameters", a.ko.Spec.TargetParameters.RedshiftDataParameters, b.ko.Spec.TargetParameters.RedshiftDataParameters) + } else if a.ko.Spec.TargetParameters.RedshiftDataParameters != nil && b.ko.Spec.TargetParameters.RedshiftDataParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.RedshiftDataParameters.Database, b.ko.Spec.TargetParameters.RedshiftDataParameters.Database) { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.Database", a.ko.Spec.TargetParameters.RedshiftDataParameters.Database, b.ko.Spec.TargetParameters.RedshiftDataParameters.Database) + } else if a.ko.Spec.TargetParameters.RedshiftDataParameters.Database != nil && b.ko.Spec.TargetParameters.RedshiftDataParameters.Database != nil { + if *a.ko.Spec.TargetParameters.RedshiftDataParameters.Database != *b.ko.Spec.TargetParameters.RedshiftDataParameters.Database { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.Database", a.ko.Spec.TargetParameters.RedshiftDataParameters.Database, b.ko.Spec.TargetParameters.RedshiftDataParameters.Database) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser, b.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser) { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.DBUser", a.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser, b.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser) + } else if a.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser != nil && b.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser != nil { + if *a.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser != *b.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.DBUser", a.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser, b.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN, b.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN) { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN", a.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN, b.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN) + } else if a.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN != nil && b.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN != nil { + if *a.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN != *b.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN", a.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN, b.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN) + } + } + if !ackcompare.SliceStringPEqual(a.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs, b.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs) { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.SQLs", a.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs, b.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs) + } + if hasNilDifference(a.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName, b.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName) { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.StatementName", a.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName, b.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName) + } else if a.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName != nil && b.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName != nil { + if *a.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName != *b.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.StatementName", a.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName, b.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent, b.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent) { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.WithEvent", a.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent, b.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent) + } else if a.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent != nil && b.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent != nil { + if *a.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent != *b.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent { + delta.Add("Spec.TargetParameters.RedshiftDataParameters.WithEvent", a.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent, b.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.SageMakerPipelineParameters, b.ko.Spec.TargetParameters.SageMakerPipelineParameters) { + delta.Add("Spec.TargetParameters.SageMakerPipelineParameters", a.ko.Spec.TargetParameters.SageMakerPipelineParameters, b.ko.Spec.TargetParameters.SageMakerPipelineParameters) + } else if a.ko.Spec.TargetParameters.SageMakerPipelineParameters != nil && b.ko.Spec.TargetParameters.SageMakerPipelineParameters != nil { + if !reflect.DeepEqual(a.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList, b.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList) { + delta.Add("Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList", a.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList, b.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.SQSQueueParameters, b.ko.Spec.TargetParameters.SQSQueueParameters) { + delta.Add("Spec.TargetParameters.SQSQueueParameters", a.ko.Spec.TargetParameters.SQSQueueParameters, b.ko.Spec.TargetParameters.SQSQueueParameters) + } else if a.ko.Spec.TargetParameters.SQSQueueParameters != nil && b.ko.Spec.TargetParameters.SQSQueueParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID, b.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID) { + delta.Add("Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID", a.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID, b.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID) + } else if a.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID != nil && b.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID != nil { + if *a.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID != *b.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID { + delta.Add("Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID", a.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID, b.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID) + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID, b.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID) { + delta.Add("Spec.TargetParameters.SQSQueueParameters.MessageGroupID", a.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID, b.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID) + } else if a.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID != nil && b.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID != nil { + if *a.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID != *b.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID { + delta.Add("Spec.TargetParameters.SQSQueueParameters.MessageGroupID", a.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID, b.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID) + } + } + } + if hasNilDifference(a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters, b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters) { + delta.Add("Spec.TargetParameters.StepFunctionStateMachineParameters", a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters, b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters) + } else if a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters != nil && b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters != nil { + if hasNilDifference(a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType, b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType) { + delta.Add("Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType", a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType, b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType) + } else if a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType != nil && b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType != nil { + if *a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType != *b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType { + delta.Add("Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType", a.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType, b.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType) + } + } + } + } + + if hasNilDifference(a.ko.Spec.EnrichmentParameters, b.ko.Spec.EnrichmentParameters) { + delta.Add("Spec.EnrichmentParameters", a.ko.Spec.EnrichmentParameters, b.ko.Spec.EnrichmentParameters) + } else if a.ko.Spec.EnrichmentParameters != nil && b.ko.Spec.EnrichmentParameters != nil { + if hasNilDifference(a.ko.Spec.EnrichmentParameters.HTTPParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters) { + delta.Add("Spec.EnrichmentParameters.HTTPParameters", a.ko.Spec.EnrichmentParameters.HTTPParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters) + } else if a.ko.Spec.EnrichmentParameters.HTTPParameters != nil && b.ko.Spec.EnrichmentParameters.HTTPParameters != nil { + if hasNilDifference(a.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters) { + delta.Add("Spec.EnrichmentParameters.HTTPParameters.HeaderParameters", a.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters) + } else if a.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters != nil && b.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters) { + delta.Add("Spec.EnrichmentParameters.HTTPParameters.HeaderParameters", a.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters) + } + } + if !ackcompare.SliceStringPEqual(a.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues, b.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues) { + delta.Add("Spec.EnrichmentParameters.HTTPParameters.PathParameterValues", a.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues, b.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues) + } + if hasNilDifference(a.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters) { + delta.Add("Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters", a.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters) + } else if a.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters != nil && b.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters) { + delta.Add("Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters", a.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters, b.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters) + } + } + } + if hasNilDifference(a.ko.Spec.EnrichmentParameters.InputTemplate, b.ko.Spec.EnrichmentParameters.InputTemplate) { + delta.Add("Spec.EnrichmentParameters.InputTemplate", a.ko.Spec.EnrichmentParameters.InputTemplate, b.ko.Spec.EnrichmentParameters.InputTemplate) + } else if a.ko.Spec.EnrichmentParameters.InputTemplate != nil && b.ko.Spec.EnrichmentParameters.InputTemplate != nil { + if *a.ko.Spec.EnrichmentParameters.InputTemplate != *b.ko.Spec.EnrichmentParameters.InputTemplate { + delta.Add("Spec.EnrichmentParameters.InputTemplate", a.ko.Spec.EnrichmentParameters.InputTemplate, b.ko.Spec.EnrichmentParameters.InputTemplate) + } + } + } +} + +// pipeAvailable returns true if the supplied Pipe is in a running status +func pipeAvailable(r *resource) bool { + if r.ko.Status.CurrentState == nil { + return false + } + state := *r.ko.Status.CurrentState + return state == svcsdk.PipeStateRunning +} + +// pipeInMutatingState returns true if the supplied Pipe is in the process of +// being modified +func pipeInMutatingState(r *resource) bool { + if r.ko.Status.CurrentState == nil { + return false + } + state := *r.ko.Status.CurrentState + + mutatingStates := []string{ + svcsdk.PipeStateCreating, + svcsdk.PipeStateStarting, + svcsdk.PipeStateStopping, + svcsdk.PipeStateUpdating, + svcsdk.PipeStateDeleting, + } + + for _, s := range mutatingStates { + if state == s { + return true + } + } + return false +} + +// if an optional desired field value is nil explicitly unset it in the request +// input +func unsetRemovedSpecFields( + delta *ackcompare.Delta, + spec svcapitypes.PipeSpec, + input *svcsdk.UpdatePipeInput, +) { + if delta.DifferentAt("Spec.Description") { + if spec.Description == nil { + input.SetDescription("") + } + } + + if delta.DifferentAt("Spec.DesiredState") { + if spec.DesiredState == nil { + input.SetDesiredState("") + } + } +} diff --git a/pkg/resource/pipe/hooks_tags.go b/pkg/resource/pipe/hooks_tags.go new file mode 100644 index 0000000..c71f1d3 --- /dev/null +++ b/pkg/resource/pipe/hooks_tags.go @@ -0,0 +1,91 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pipe + +import ( + "context" + + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + svcsdk "github.com/aws/aws-sdk-go/service/pipes" +) + +// updatePipeTags uses TagResource and UntagResource to add, remove and update +// a pipe tags. +func (rm *resourceManager) updatePipeTags( + ctx context.Context, + latest *resource, + desired *resource, +) error { + var err error + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.updatePipeTags") + defer exit(err) + + addedOrUpdated, removed := compareMaps(latest.ko.Spec.Tags, desired.ko.Spec.Tags) + + if len(removed) > 0 { + input := &svcsdk.UntagResourceInput{ + ResourceArn: (*string)(desired.ko.Status.ACKResourceMetadata.ARN), + TagKeys: removed, + } + _, err = rm.sdkapi.UntagResourceWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "UntagResource", err) + if err != nil { + return err + } + } + + if len(addedOrUpdated) > 0 { + input := &svcsdk.TagResourceInput{ + ResourceArn: (*string)(desired.ko.Status.ACKResourceMetadata.ARN), + Tags: addedOrUpdated, + } + _, err = rm.sdkapi.TagResourceWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "TagResource", err) + if err != nil { + return err + } + } + + return nil +} + +// compareMaps compares two string to string maps and returns three outputs: a +// map of the new key/values observed, a list of the keys of the removed values +// and a map containing the updated keys and their new values. +func compareMaps( + a map[string]*string, + b map[string]*string, +) (addedOrUpdated map[string]*string, removed []*string) { + addedOrUpdated = make(map[string]*string) + visited := make(map[string]bool, len(a)) + for keyA, valueA := range a { + valueB, found := b[keyA] + if !found { + removed = append(removed, &keyA) + continue + } + if *valueA != *valueB { + addedOrUpdated[keyA] = valueB + } + visited[keyA] = true + } + for keyB, valueB := range b { + _, found := a[keyB] + if !found { + addedOrUpdated[keyB] = valueB + } + } + return +} diff --git a/pkg/resource/pipe/identifiers.go b/pkg/resource/pipe/identifiers.go new file mode 100644 index 0000000..ddd1da3 --- /dev/null +++ b/pkg/resource/pipe/identifiers.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" +) + +// resourceIdentifiers implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceIdentifiers` interface +type resourceIdentifiers struct { + meta *ackv1alpha1.ResourceMetadata +} + +// ARN returns the AWS Resource Name for the backend AWS resource. If nil, +// this means the resource has not yet been created in the backend AWS +// service. +func (ri *resourceIdentifiers) ARN() *ackv1alpha1.AWSResourceName { + if ri.meta != nil { + return ri.meta.ARN + } + return nil +} + +// OwnerAccountID returns the AWS account identifier in which the +// backend AWS resource resides, or nil if this information is not known +// for the resource +func (ri *resourceIdentifiers) OwnerAccountID() *ackv1alpha1.AWSAccountID { + if ri.meta != nil { + return ri.meta.OwnerAccountID + } + return nil +} + +// Region returns the AWS region in which the resource exists, or +// nil if this information is not known. +func (ri *resourceIdentifiers) Region() *ackv1alpha1.AWSRegion { + if ri.meta != nil { + return ri.meta.Region + } + return nil +} diff --git a/pkg/resource/pipe/manager.go b/pkg/resource/pipe/manager.go new file mode 100644 index 0000000..64471b6 --- /dev/null +++ b/pkg/resource/pipe/manager.go @@ -0,0 +1,360 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + "context" + "fmt" + "time" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrt "github.com/aws-controllers-k8s/runtime/pkg/runtime" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" + "github.com/aws/aws-sdk-go/aws/session" + svcsdk "github.com/aws/aws-sdk-go/service/pipes" + svcsdkapi "github.com/aws/aws-sdk-go/service/pipes/pipesiface" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" +) + +var ( + _ = ackutil.InStrings + _ = acktags.NewTags() + _ = ackrt.MissingImageTagValue + _ = svcapitypes.Pipe{} +) + +// +kubebuilder:rbac:groups=pipes.services.k8s.aws,resources=pipes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=pipes.services.k8s.aws,resources=pipes/status,verbs=get;update;patch + +var lateInitializeFieldNames = []string{} + +// resourceManager is responsible for providing a consistent way to perform +// CRUD operations in a backend AWS service API for Book custom resources. +type resourceManager struct { + // cfg is a copy of the ackcfg.Config object passed on start of the service + // controller + cfg ackcfg.Config + // log refers to the logr.Logger object handling logging for the service + // controller + log logr.Logger + // metrics contains a collection of Prometheus metric objects that the + // service controller and its reconcilers track + metrics *ackmetrics.Metrics + // rr is the Reconciler which can be used for various utility + // functions such as querying for Secret values given a SecretReference + rr acktypes.Reconciler + // awsAccountID is the AWS account identifier that contains the resources + // managed by this resource manager + awsAccountID ackv1alpha1.AWSAccountID + // The AWS Region that this resource manager targets + awsRegion ackv1alpha1.AWSRegion + // sess is the AWS SDK Session object used to communicate with the backend + // AWS service API + sess *session.Session + // sdk is a pointer to the AWS service API interface exposed by the + // aws-sdk-go/services/{alias}/{alias}iface package. + sdkapi svcsdkapi.PipesAPI +} + +// concreteResource returns a pointer to a resource from the supplied +// generic AWSResource interface +func (rm *resourceManager) concreteResource( + res acktypes.AWSResource, +) *resource { + // cast the generic interface into a pointer type specific to the concrete + // implementing resource type managed by this resource manager + return res.(*resource) +} + +// ReadOne returns the currently-observed state of the supplied AWSResource in +// the backend AWS service API. +func (rm *resourceManager) ReadOne( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's ReadOne() method received resource with nil CR object") + } + observed, err := rm.sdkFind(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(observed) +} + +// Create attempts to create the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-created +// resource +func (rm *resourceManager) Create( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Create() method received resource with nil CR object") + } + created, err := rm.sdkCreate(ctx, r) + if err != nil { + if created != nil { + return rm.onError(created, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(created) +} + +// Update attempts to mutate the supplied desired AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-mutated +// resource. +// Note for specialized logic implementers can check to see how the latest +// observed resource differs from the supplied desired state. The +// higher-level reonciler determines whether or not the desired differs +// from the latest observed and decides whether to call the resource +// manager's Update method +func (rm *resourceManager) Update( + ctx context.Context, + resDesired acktypes.AWSResource, + resLatest acktypes.AWSResource, + delta *ackcompare.Delta, +) (acktypes.AWSResource, error) { + desired := rm.concreteResource(resDesired) + latest := rm.concreteResource(resLatest) + if desired.ko == nil || latest.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) + if err != nil { + if updated != nil { + return rm.onError(updated, err) + } + return rm.onError(latest, err) + } + return rm.onSuccess(updated) +} + +// Delete attempts to destroy the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the +// resource being deleted (if delete is asynchronous and takes time) +func (rm *resourceManager) Delete( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + observed, err := rm.sdkDelete(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + + return rm.onSuccess(observed) +} + +// ARNFromName returns an AWS Resource Name from a given string name. This +// is useful for constructing ARNs for APIs that require ARNs in their +// GetAttributes operations but all we have (for new CRs at least) is a +// name for the resource +func (rm *resourceManager) ARNFromName(name string) string { + return fmt.Sprintf( + "arn:aws:pipes:%s:%s:%s", + rm.awsRegion, + rm.awsAccountID, + name, + ) +} + +// LateInitialize returns an acktypes.AWSResource after setting the late initialized +// fields from the readOne call. This method will initialize the optional fields +// which were not provided by the k8s user but were defaulted by the AWS service. +// If there are no such fields to be initialized, the returned object is similar to +// object passed in the parameter. +func (rm *resourceManager) LateInitialize( + ctx context.Context, + latest acktypes.AWSResource, +) (acktypes.AWSResource, error) { + rlog := ackrtlog.FromContext(ctx) + // If there are no fields to late initialize, do nothing + if len(lateInitializeFieldNames) == 0 { + rlog.Debug("no late initialization required.") + return latest, nil + } + latestCopy := latest.DeepCopy() + lateInitConditionReason := "" + lateInitConditionMessage := "" + observed, err := rm.ReadOne(ctx, latestCopy) + if err != nil { + lateInitConditionMessage = "Unable to complete Read operation required for late initialization" + lateInitConditionReason = "Late Initialization Failure" + ackcondition.SetLateInitialized(latestCopy, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(latestCopy, corev1.ConditionFalse, nil, nil) + return latestCopy, err + } + lateInitializedRes := rm.lateInitializeFromReadOneOutput(observed, latestCopy) + incompleteInitialization := rm.incompleteLateInitialization(lateInitializedRes) + if incompleteInitialization { + // Add the condition with LateInitialized=False + lateInitConditionMessage = "Late initialization did not complete, requeuing with delay of 5 seconds" + lateInitConditionReason = "Delayed Late Initialization" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(lateInitializedRes, corev1.ConditionFalse, nil, nil) + return lateInitializedRes, ackrequeue.NeededAfter(nil, time.Duration(5)*time.Second) + } + // Set LateInitialized condition to True + lateInitConditionMessage = "Late initialization successful" + lateInitConditionReason = "Late initialization successful" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionTrue, &lateInitConditionMessage, &lateInitConditionReason) + return lateInitializedRes, nil +} + +// incompleteLateInitialization return true if there are fields which were supposed to be +// late initialized but are not. If all the fields are late initialized, false is returned +func (rm *resourceManager) incompleteLateInitialization( + res acktypes.AWSResource, +) bool { + return false +} + +// lateInitializeFromReadOneOutput late initializes the 'latest' resource from the 'observed' +// resource and returns 'latest' resource +func (rm *resourceManager) lateInitializeFromReadOneOutput( + observed acktypes.AWSResource, + latest acktypes.AWSResource, +) acktypes.AWSResource { + return latest +} + +// IsSynced returns true if the resource is synced. +func (rm *resourceManager) IsSynced(ctx context.Context, res acktypes.AWSResource) (bool, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's IsSynced() method received resource with nil CR object") + } + + return true, nil +} + +// EnsureTags ensures that tags are present inside the AWSResource. +// If the AWSResource does not have any existing resource tags, the 'tags' +// field is initialized and the controller tags are added. +// If the AWSResource has existing resource tags, then controller tags are +// added to the existing resource tags without overriding them. +// If the AWSResource does not support tags, only then the controller tags +// will not be added to the AWSResource. +func (rm *resourceManager) EnsureTags( + ctx context.Context, + res acktypes.AWSResource, + md acktypes.ServiceControllerMetadata, +) error { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's EnsureTags method received resource with nil CR object") + } + defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) + var existingTags map[string]*string + existingTags = r.ko.Spec.Tags + resourceTags := ToACKTags(existingTags) + tags := acktags.Merge(resourceTags, defaultTags) + r.ko.Spec.Tags = FromACKTags(tags) + return nil +} + +// newResourceManager returns a new struct implementing +// acktypes.AWSResourceManager +func newResourceManager( + cfg ackcfg.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + sess *session.Session, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (*resourceManager, error) { + return &resourceManager{ + cfg: cfg, + log: log, + metrics: metrics, + rr: rr, + awsAccountID: id, + awsRegion: region, + sess: sess, + sdkapi: svcsdk.New(sess), + }, nil +} + +// onError updates resource conditions and returns updated resource +// it returns nil if no condition is updated. +func (rm *resourceManager) onError( + r *resource, + err error, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, err + } + r1, updated := rm.updateConditions(r, false, err) + if !updated { + return r, err + } + for _, condition := range r1.Conditions() { + if condition.Type == ackv1alpha1.ConditionTypeTerminal && + condition.Status == corev1.ConditionTrue { + // resource is in Terminal condition + // return Terminal error + return r1, ackerr.Terminal + } + } + return r1, err +} + +// onSuccess updates resource conditions and returns updated resource +// it returns the supplied resource if no condition is updated. +func (rm *resourceManager) onSuccess( + r *resource, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, nil + } + r1, updated := rm.updateConditions(r, true, nil) + if !updated { + return r, nil + } + return r1, nil +} diff --git a/pkg/resource/pipe/manager_factory.go b/pkg/resource/pipe/manager_factory.go new file mode 100644 index 0000000..ad3087d --- /dev/null +++ b/pkg/resource/pipe/manager_factory.go @@ -0,0 +1,96 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + "fmt" + "sync" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/go-logr/logr" + + svcresource "github.com/aws-controllers-k8s/pipes-controller/pkg/resource" +) + +// resourceManagerFactory produces resourceManager objects. It implements the +// `types.AWSResourceManagerFactory` interface. +type resourceManagerFactory struct { + sync.RWMutex + // rmCache contains resource managers for a particular AWS account ID + rmCache map[string]*resourceManager +} + +// ResourcePrototype returns an AWSResource that resource managers produced by +// this factory will handle +func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescriptor { + return &resourceDescriptor{} +} + +// ManagerFor returns a resource manager object that can manage resources for a +// supplied AWS account +func (f *resourceManagerFactory) ManagerFor( + cfg ackcfg.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + sess *session.Session, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (acktypes.AWSResourceManager, error) { + rmId := fmt.Sprintf("%s/%s", id, region) + f.RLock() + rm, found := f.rmCache[rmId] + f.RUnlock() + + if found { + return rm, nil + } + + f.Lock() + defer f.Unlock() + + rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + if err != nil { + return nil, err + } + f.rmCache[rmId] = rm + return rm, nil +} + +// IsAdoptable returns true if the resource is able to be adopted +func (f *resourceManagerFactory) IsAdoptable() bool { + return true +} + +// RequeueOnSuccessSeconds returns true if the resource should be requeued after specified seconds +// Default is false which means resource will not be requeued after success. +func (f *resourceManagerFactory) RequeueOnSuccessSeconds() int { + return 0 +} + +func newResourceManagerFactory() *resourceManagerFactory { + return &resourceManagerFactory{ + rmCache: map[string]*resourceManager{}, + } +} + +func init() { + svcresource.RegisterManagerFactory(newResourceManagerFactory()) +} diff --git a/pkg/resource/pipe/references.go b/pkg/resource/pipe/references.go new file mode 100644 index 0000000..2fdcc32 --- /dev/null +++ b/pkg/resource/pipe/references.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + "context" + "sigs.k8s.io/controller-runtime/pkg/client" + + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + + svcapitypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" +) + +// ResolveReferences finds if there are any Reference field(s) present +// inside AWSResource passed in the parameter and attempts to resolve +// those reference field(s) into target field(s). +// It returns an AWSResource with resolved reference(s), and an error if the +// passed AWSResource's reference field(s) cannot be resolved. +// This method also adds/updates the ConditionTypeReferencesResolved for the +// AWSResource. +func (rm *resourceManager) ResolveReferences( + ctx context.Context, + apiReader client.Reader, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + return res, nil +} + +// validateReferenceFields validates the reference field and corresponding +// identifier field. +func validateReferenceFields(ko *svcapitypes.Pipe) error { + return nil +} + +// hasNonNilReferences returns true if resource contains a reference to another +// resource +func hasNonNilReferences(ko *svcapitypes.Pipe) bool { + return false +} diff --git a/pkg/resource/pipe/resource.go b/pkg/resource/pipe/resource.go new file mode 100644 index 0000000..3f50988 --- /dev/null +++ b/pkg/resource/pipe/resource.go @@ -0,0 +1,100 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + + svcapitypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &ackerrors.MissingNameIdentifier +) + +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` +// interface +type resource struct { + // The Kubernetes-native CR representing the resource + ko *svcapitypes.Pipe +} + +// Identifiers returns an AWSResourceIdentifiers object containing various +// identifying information, including the AWS account ID that owns the +// resource, the resource's AWS Resource Name (ARN) +func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { + return &resourceIdentifiers{r.ko.Status.ACKResourceMetadata} +} + +// IsBeingDeleted returns true if the Kubernetes resource has a non-zero +// deletion timestemp +func (r *resource) IsBeingDeleted() bool { + return !r.ko.DeletionTimestamp.IsZero() +} + +// RuntimeObject returns the Kubernetes apimachinery/runtime representation of +// the AWSResource +func (r *resource) RuntimeObject() rtclient.Object { + return r.ko +} + +// MetaObject returns the Kubernetes apimachinery/apis/meta/v1.Object +// representation of the AWSResource +func (r *resource) MetaObject() metav1.Object { + return r.ko.GetObjectMeta() +} + +// Conditions returns the ACK Conditions collection for the AWSResource +func (r *resource) Conditions() []*ackv1alpha1.Condition { + return r.ko.Status.Conditions +} + +// ReplaceConditions sets the Conditions status field for the resource +func (r *resource) ReplaceConditions(conditions []*ackv1alpha1.Condition) { + r.ko.Status.Conditions = conditions +} + +// SetObjectMeta sets the ObjectMeta field for the resource +func (r *resource) SetObjectMeta(meta metav1.ObjectMeta) { + r.ko.ObjectMeta = meta +} + +// SetStatus will set the Status field for the resource +func (r *resource) SetStatus(desired acktypes.AWSResource) { + r.ko.Status = desired.(*resource).ko.Status +} + +// SetIdentifiers sets the Spec or Status field that is referenced as the unique +// resource identifier +func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error { + if identifier.NameOrID == "" { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.Name = &identifier.NameOrID + + return nil +} + +// DeepCopy will return a copy of the resource +func (r *resource) DeepCopy() acktypes.AWSResource { + koCopy := r.ko.DeepCopy() + return &resource{koCopy} +} diff --git a/pkg/resource/pipe/sdk.go b/pkg/resource/pipe/sdk.go new file mode 100644 index 0000000..f23a8f8 --- /dev/null +++ b/pkg/resource/pipe/sdk.go @@ -0,0 +1,2776 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + "github.com/aws/aws-sdk-go/aws" + svcsdk "github.com/aws/aws-sdk-go/service/pipes" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + svcapitypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &metav1.Time{} + _ = strings.ToLower("") + _ = &aws.JSONValue{} + _ = &svcsdk.Pipes{} + _ = &svcapitypes.Pipe{} + _ = ackv1alpha1.AWSAccountID("") + _ = &ackerr.NotFound + _ = &ackcondition.NotManagedMessage + _ = &reflect.Value{} + _ = fmt.Sprintf("") + _ = &ackrequeue.NoRequeue{} +) + +// sdkFind returns SDK-specific information about a supplied resource +func (rm *resourceManager) sdkFind( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkFind") + defer func() { + exit(err) + }() + // If any required fields in the input shape are missing, AWS resource is + // not created yet. Return NotFound here to indicate to callers that the + // resource isn't yet created. + if rm.requiredFieldsMissingFromReadOneInput(r) { + return nil, ackerr.NotFound + } + + input, err := rm.newDescribeRequestPayload(r) + if err != nil { + return nil, err + } + + var resp *svcsdk.DescribePipeOutput + resp, err = rm.sdkapi.DescribePipeWithContext(ctx, input) + rm.metrics.RecordAPICall("READ_ONE", "DescribePipe", err) + if err != nil { + if reqErr, ok := ackerr.AWSRequestFailure(err); ok && reqErr.StatusCode() == 404 { + return nil, ackerr.NotFound + } + if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "NotFoundException" { + return nil, ackerr.NotFound + } + return nil, err + } + + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := r.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.Arn != nil { + arn := ackv1alpha1.AWSResourceName(*resp.Arn) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.CreationTime != nil { + ko.Status.CreationTime = &metav1.Time{*resp.CreationTime} + } else { + ko.Status.CreationTime = nil + } + if resp.CurrentState != nil { + ko.Status.CurrentState = resp.CurrentState + } else { + ko.Status.CurrentState = nil + } + if resp.Description != nil { + ko.Spec.Description = resp.Description + } else { + ko.Spec.Description = nil + } + if resp.DesiredState != nil { + ko.Spec.DesiredState = resp.DesiredState + } else { + ko.Spec.DesiredState = nil + } + if resp.Enrichment != nil { + ko.Spec.Enrichment = resp.Enrichment + } else { + ko.Spec.Enrichment = nil + } + if resp.EnrichmentParameters != nil { + f6 := &svcapitypes.PipeEnrichmentParameters{} + if resp.EnrichmentParameters.HttpParameters != nil { + f6f0 := &svcapitypes.PipeEnrichmentHTTPParameters{} + if resp.EnrichmentParameters.HttpParameters.HeaderParameters != nil { + f6f0f0 := map[string]*string{} + for f6f0f0key, f6f0f0valiter := range resp.EnrichmentParameters.HttpParameters.HeaderParameters { + var f6f0f0val string + f6f0f0val = *f6f0f0valiter + f6f0f0[f6f0f0key] = &f6f0f0val + } + f6f0.HeaderParameters = f6f0f0 + } + if resp.EnrichmentParameters.HttpParameters.PathParameterValues != nil { + f6f0f1 := []*string{} + for _, f6f0f1iter := range resp.EnrichmentParameters.HttpParameters.PathParameterValues { + var f6f0f1elem string + f6f0f1elem = *f6f0f1iter + f6f0f1 = append(f6f0f1, &f6f0f1elem) + } + f6f0.PathParameterValues = f6f0f1 + } + if resp.EnrichmentParameters.HttpParameters.QueryStringParameters != nil { + f6f0f2 := map[string]*string{} + for f6f0f2key, f6f0f2valiter := range resp.EnrichmentParameters.HttpParameters.QueryStringParameters { + var f6f0f2val string + f6f0f2val = *f6f0f2valiter + f6f0f2[f6f0f2key] = &f6f0f2val + } + f6f0.QueryStringParameters = f6f0f2 + } + f6.HTTPParameters = f6f0 + } + if resp.EnrichmentParameters.InputTemplate != nil { + f6.InputTemplate = resp.EnrichmentParameters.InputTemplate + } + ko.Spec.EnrichmentParameters = f6 + } else { + ko.Spec.EnrichmentParameters = nil + } + if resp.LastModifiedTime != nil { + ko.Status.LastModifiedTime = &metav1.Time{*resp.LastModifiedTime} + } else { + ko.Status.LastModifiedTime = nil + } + if resp.Name != nil { + ko.Spec.Name = resp.Name + } else { + ko.Spec.Name = nil + } + if resp.RoleArn != nil { + ko.Spec.RoleARN = resp.RoleArn + } else { + ko.Spec.RoleARN = nil + } + if resp.Source != nil { + ko.Spec.Source = resp.Source + } else { + ko.Spec.Source = nil + } + if resp.SourceParameters != nil { + f11 := &svcapitypes.PipeSourceParameters{} + if resp.SourceParameters.ActiveMQBrokerParameters != nil { + f11f0 := &svcapitypes.PipeSourceActiveMQBrokerParameters{} + if resp.SourceParameters.ActiveMQBrokerParameters.BatchSize != nil { + f11f0.BatchSize = resp.SourceParameters.ActiveMQBrokerParameters.BatchSize + } + if resp.SourceParameters.ActiveMQBrokerParameters.Credentials != nil { + f11f0f1 := &svcapitypes.MQBrokerAccessCredentials{} + if resp.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth != nil { + f11f0f1.BasicAuth = resp.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth + } + f11f0.Credentials = f11f0f1 + } + if resp.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + f11f0.MaximumBatchingWindowInSeconds = resp.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds + } + if resp.SourceParameters.ActiveMQBrokerParameters.QueueName != nil { + f11f0.QueueName = resp.SourceParameters.ActiveMQBrokerParameters.QueueName + } + f11.ActiveMQBrokerParameters = f11f0 + } + if resp.SourceParameters.DynamoDBStreamParameters != nil { + f11f1 := &svcapitypes.PipeSourceDynamoDBStreamParameters{} + if resp.SourceParameters.DynamoDBStreamParameters.BatchSize != nil { + f11f1.BatchSize = resp.SourceParameters.DynamoDBStreamParameters.BatchSize + } + if resp.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig != nil { + f11f1f1 := &svcapitypes.DeadLetterConfig{} + if resp.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.Arn != nil { + f11f1f1.ARN = resp.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.Arn + } + f11f1.DeadLetterConfig = f11f1f1 + } + if resp.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds != nil { + f11f1.MaximumBatchingWindowInSeconds = resp.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds + } + if resp.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds != nil { + f11f1.MaximumRecordAgeInSeconds = resp.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds + } + if resp.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts != nil { + f11f1.MaximumRetryAttempts = resp.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts + } + if resp.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure != nil { + f11f1.OnPartialBatchItemFailure = resp.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure + } + if resp.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor != nil { + f11f1.ParallelizationFactor = resp.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor + } + if resp.SourceParameters.DynamoDBStreamParameters.StartingPosition != nil { + f11f1.StartingPosition = resp.SourceParameters.DynamoDBStreamParameters.StartingPosition + } + f11.DynamoDBStreamParameters = f11f1 + } + if resp.SourceParameters.FilterCriteria != nil { + f11f2 := &svcapitypes.FilterCriteria{} + if resp.SourceParameters.FilterCriteria.Filters != nil { + f11f2f0 := []*svcapitypes.Filter{} + for _, f11f2f0iter := range resp.SourceParameters.FilterCriteria.Filters { + f11f2f0elem := &svcapitypes.Filter{} + if f11f2f0iter.Pattern != nil { + f11f2f0elem.Pattern = f11f2f0iter.Pattern + } + f11f2f0 = append(f11f2f0, f11f2f0elem) + } + f11f2.Filters = f11f2f0 + } + f11.FilterCriteria = f11f2 + } + if resp.SourceParameters.KinesisStreamParameters != nil { + f11f3 := &svcapitypes.PipeSourceKinesisStreamParameters{} + if resp.SourceParameters.KinesisStreamParameters.BatchSize != nil { + f11f3.BatchSize = resp.SourceParameters.KinesisStreamParameters.BatchSize + } + if resp.SourceParameters.KinesisStreamParameters.DeadLetterConfig != nil { + f11f3f1 := &svcapitypes.DeadLetterConfig{} + if resp.SourceParameters.KinesisStreamParameters.DeadLetterConfig.Arn != nil { + f11f3f1.ARN = resp.SourceParameters.KinesisStreamParameters.DeadLetterConfig.Arn + } + f11f3.DeadLetterConfig = f11f3f1 + } + if resp.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds != nil { + f11f3.MaximumBatchingWindowInSeconds = resp.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds + } + if resp.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds != nil { + f11f3.MaximumRecordAgeInSeconds = resp.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds + } + if resp.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts != nil { + f11f3.MaximumRetryAttempts = resp.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts + } + if resp.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure != nil { + f11f3.OnPartialBatchItemFailure = resp.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure + } + if resp.SourceParameters.KinesisStreamParameters.ParallelizationFactor != nil { + f11f3.ParallelizationFactor = resp.SourceParameters.KinesisStreamParameters.ParallelizationFactor + } + if resp.SourceParameters.KinesisStreamParameters.StartingPosition != nil { + f11f3.StartingPosition = resp.SourceParameters.KinesisStreamParameters.StartingPosition + } + if resp.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp != nil { + f11f3.StartingPositionTimestamp = &metav1.Time{*resp.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp} + } + f11.KinesisStreamParameters = f11f3 + } + if resp.SourceParameters.ManagedStreamingKafkaParameters != nil { + f11f4 := &svcapitypes.PipeSourceManagedStreamingKafkaParameters{} + if resp.SourceParameters.ManagedStreamingKafkaParameters.BatchSize != nil { + f11f4.BatchSize = resp.SourceParameters.ManagedStreamingKafkaParameters.BatchSize + } + if resp.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID != nil { + f11f4.ConsumerGroupID = resp.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID + } + if resp.SourceParameters.ManagedStreamingKafkaParameters.Credentials != nil { + f11f4f2 := &svcapitypes.MSKAccessCredentials{} + if resp.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTlsAuth != nil { + f11f4f2.ClientCertificateTLSAuth = resp.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTlsAuth + } + if resp.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SaslScram512Auth != nil { + f11f4f2.SASLSCRAM512Auth = resp.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SaslScram512Auth + } + f11f4.Credentials = f11f4f2 + } + if resp.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds != nil { + f11f4.MaximumBatchingWindowInSeconds = resp.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds + } + if resp.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition != nil { + f11f4.StartingPosition = resp.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition + } + if resp.SourceParameters.ManagedStreamingKafkaParameters.TopicName != nil { + f11f4.TopicName = resp.SourceParameters.ManagedStreamingKafkaParameters.TopicName + } + f11.ManagedStreamingKafkaParameters = f11f4 + } + if resp.SourceParameters.RabbitMQBrokerParameters != nil { + f11f5 := &svcapitypes.PipeSourceRabbitMQBrokerParameters{} + if resp.SourceParameters.RabbitMQBrokerParameters.BatchSize != nil { + f11f5.BatchSize = resp.SourceParameters.RabbitMQBrokerParameters.BatchSize + } + if resp.SourceParameters.RabbitMQBrokerParameters.Credentials != nil { + f11f5f1 := &svcapitypes.MQBrokerAccessCredentials{} + if resp.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth != nil { + f11f5f1.BasicAuth = resp.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth + } + f11f5.Credentials = f11f5f1 + } + if resp.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + f11f5.MaximumBatchingWindowInSeconds = resp.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds + } + if resp.SourceParameters.RabbitMQBrokerParameters.QueueName != nil { + f11f5.QueueName = resp.SourceParameters.RabbitMQBrokerParameters.QueueName + } + if resp.SourceParameters.RabbitMQBrokerParameters.VirtualHost != nil { + f11f5.VirtualHost = resp.SourceParameters.RabbitMQBrokerParameters.VirtualHost + } + f11.RabbitMQBrokerParameters = f11f5 + } + if resp.SourceParameters.SelfManagedKafkaParameters != nil { + f11f6 := &svcapitypes.PipeSourceSelfManagedKafkaParameters{} + if resp.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers != nil { + f11f6f0 := []*string{} + for _, f11f6f0iter := range resp.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers { + var f11f6f0elem string + f11f6f0elem = *f11f6f0iter + f11f6f0 = append(f11f6f0, &f11f6f0elem) + } + f11f6.AdditionalBootstrapServers = f11f6f0 + } + if resp.SourceParameters.SelfManagedKafkaParameters.BatchSize != nil { + f11f6.BatchSize = resp.SourceParameters.SelfManagedKafkaParameters.BatchSize + } + if resp.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID != nil { + f11f6.ConsumerGroupID = resp.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID + } + if resp.SourceParameters.SelfManagedKafkaParameters.Credentials != nil { + f11f6f3 := &svcapitypes.SelfManagedKafkaAccessConfigurationCredentials{} + if resp.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth != nil { + f11f6f3.BasicAuth = resp.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth + } + if resp.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTlsAuth != nil { + f11f6f3.ClientCertificateTLSAuth = resp.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTlsAuth + } + if resp.SourceParameters.SelfManagedKafkaParameters.Credentials.SaslScram256Auth != nil { + f11f6f3.SASLSCRAM256Auth = resp.SourceParameters.SelfManagedKafkaParameters.Credentials.SaslScram256Auth + } + if resp.SourceParameters.SelfManagedKafkaParameters.Credentials.SaslScram512Auth != nil { + f11f6f3.SASLSCRAM512Auth = resp.SourceParameters.SelfManagedKafkaParameters.Credentials.SaslScram512Auth + } + f11f6.Credentials = f11f6f3 + } + if resp.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds != nil { + f11f6.MaximumBatchingWindowInSeconds = resp.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds + } + if resp.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate != nil { + f11f6.ServerRootCaCertificate = resp.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate + } + if resp.SourceParameters.SelfManagedKafkaParameters.StartingPosition != nil { + f11f6.StartingPosition = resp.SourceParameters.SelfManagedKafkaParameters.StartingPosition + } + if resp.SourceParameters.SelfManagedKafkaParameters.TopicName != nil { + f11f6.TopicName = resp.SourceParameters.SelfManagedKafkaParameters.TopicName + } + if resp.SourceParameters.SelfManagedKafkaParameters.Vpc != nil { + f11f6f8 := &svcapitypes.SelfManagedKafkaAccessConfigurationVPC{} + if resp.SourceParameters.SelfManagedKafkaParameters.Vpc.SecurityGroup != nil { + f11f6f8f0 := []*string{} + for _, f11f6f8f0iter := range resp.SourceParameters.SelfManagedKafkaParameters.Vpc.SecurityGroup { + var f11f6f8f0elem string + f11f6f8f0elem = *f11f6f8f0iter + f11f6f8f0 = append(f11f6f8f0, &f11f6f8f0elem) + } + f11f6f8.SecurityGroup = f11f6f8f0 + } + if resp.SourceParameters.SelfManagedKafkaParameters.Vpc.Subnets != nil { + f11f6f8f1 := []*string{} + for _, f11f6f8f1iter := range resp.SourceParameters.SelfManagedKafkaParameters.Vpc.Subnets { + var f11f6f8f1elem string + f11f6f8f1elem = *f11f6f8f1iter + f11f6f8f1 = append(f11f6f8f1, &f11f6f8f1elem) + } + f11f6f8.Subnets = f11f6f8f1 + } + f11f6.VPC = f11f6f8 + } + f11.SelfManagedKafkaParameters = f11f6 + } + if resp.SourceParameters.SqsQueueParameters != nil { + f11f7 := &svcapitypes.PipeSourceSQSQueueParameters{} + if resp.SourceParameters.SqsQueueParameters.BatchSize != nil { + f11f7.BatchSize = resp.SourceParameters.SqsQueueParameters.BatchSize + } + if resp.SourceParameters.SqsQueueParameters.MaximumBatchingWindowInSeconds != nil { + f11f7.MaximumBatchingWindowInSeconds = resp.SourceParameters.SqsQueueParameters.MaximumBatchingWindowInSeconds + } + f11.SQSQueueParameters = f11f7 + } + ko.Spec.SourceParameters = f11 + } else { + ko.Spec.SourceParameters = nil + } + if resp.StateReason != nil { + ko.Status.StateReason = resp.StateReason + } else { + ko.Status.StateReason = nil + } + if resp.Tags != nil { + f13 := map[string]*string{} + for f13key, f13valiter := range resp.Tags { + var f13val string + f13val = *f13valiter + f13[f13key] = &f13val + } + ko.Spec.Tags = f13 + } else { + ko.Spec.Tags = nil + } + if resp.Target != nil { + ko.Spec.Target = resp.Target + } else { + ko.Spec.Target = nil + } + if resp.TargetParameters != nil { + f15 := &svcapitypes.PipeTargetParameters{} + if resp.TargetParameters.BatchJobParameters != nil { + f15f0 := &svcapitypes.PipeTargetBatchJobParameters{} + if resp.TargetParameters.BatchJobParameters.ArrayProperties != nil { + f15f0f0 := &svcapitypes.BatchArrayProperties{} + if resp.TargetParameters.BatchJobParameters.ArrayProperties.Size != nil { + f15f0f0.Size = resp.TargetParameters.BatchJobParameters.ArrayProperties.Size + } + f15f0.ArrayProperties = f15f0f0 + } + if resp.TargetParameters.BatchJobParameters.ContainerOverrides != nil { + f15f0f1 := &svcapitypes.BatchContainerOverrides{} + if resp.TargetParameters.BatchJobParameters.ContainerOverrides.Command != nil { + f15f0f1f0 := []*string{} + for _, f15f0f1f0iter := range resp.TargetParameters.BatchJobParameters.ContainerOverrides.Command { + var f15f0f1f0elem string + f15f0f1f0elem = *f15f0f1f0iter + f15f0f1f0 = append(f15f0f1f0, &f15f0f1f0elem) + } + f15f0f1.Command = f15f0f1f0 + } + if resp.TargetParameters.BatchJobParameters.ContainerOverrides.Environment != nil { + f15f0f1f1 := []*svcapitypes.BatchEnvironmentVariable{} + for _, f15f0f1f1iter := range resp.TargetParameters.BatchJobParameters.ContainerOverrides.Environment { + f15f0f1f1elem := &svcapitypes.BatchEnvironmentVariable{} + if f15f0f1f1iter.Name != nil { + f15f0f1f1elem.Name = f15f0f1f1iter.Name + } + if f15f0f1f1iter.Value != nil { + f15f0f1f1elem.Value = f15f0f1f1iter.Value + } + f15f0f1f1 = append(f15f0f1f1, f15f0f1f1elem) + } + f15f0f1.Environment = f15f0f1f1 + } + if resp.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType != nil { + f15f0f1.InstanceType = resp.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType + } + if resp.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements != nil { + f15f0f1f3 := []*svcapitypes.BatchResourceRequirement{} + for _, f15f0f1f3iter := range resp.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements { + f15f0f1f3elem := &svcapitypes.BatchResourceRequirement{} + if f15f0f1f3iter.Type != nil { + f15f0f1f3elem.Type = f15f0f1f3iter.Type + } + if f15f0f1f3iter.Value != nil { + f15f0f1f3elem.Value = f15f0f1f3iter.Value + } + f15f0f1f3 = append(f15f0f1f3, f15f0f1f3elem) + } + f15f0f1.ResourceRequirements = f15f0f1f3 + } + f15f0.ContainerOverrides = f15f0f1 + } + if resp.TargetParameters.BatchJobParameters.DependsOn != nil { + f15f0f2 := []*svcapitypes.BatchJobDependency{} + for _, f15f0f2iter := range resp.TargetParameters.BatchJobParameters.DependsOn { + f15f0f2elem := &svcapitypes.BatchJobDependency{} + if f15f0f2iter.JobId != nil { + f15f0f2elem.JobID = f15f0f2iter.JobId + } + if f15f0f2iter.Type != nil { + f15f0f2elem.Type = f15f0f2iter.Type + } + f15f0f2 = append(f15f0f2, f15f0f2elem) + } + f15f0.DependsOn = f15f0f2 + } + if resp.TargetParameters.BatchJobParameters.JobDefinition != nil { + f15f0.JobDefinition = resp.TargetParameters.BatchJobParameters.JobDefinition + } + if resp.TargetParameters.BatchJobParameters.JobName != nil { + f15f0.JobName = resp.TargetParameters.BatchJobParameters.JobName + } + if resp.TargetParameters.BatchJobParameters.Parameters != nil { + f15f0f5 := map[string]*string{} + for f15f0f5key, f15f0f5valiter := range resp.TargetParameters.BatchJobParameters.Parameters { + var f15f0f5val string + f15f0f5val = *f15f0f5valiter + f15f0f5[f15f0f5key] = &f15f0f5val + } + f15f0.Parameters = f15f0f5 + } + if resp.TargetParameters.BatchJobParameters.RetryStrategy != nil { + f15f0f6 := &svcapitypes.BatchRetryStrategy{} + if resp.TargetParameters.BatchJobParameters.RetryStrategy.Attempts != nil { + f15f0f6.Attempts = resp.TargetParameters.BatchJobParameters.RetryStrategy.Attempts + } + f15f0.RetryStrategy = f15f0f6 + } + f15.BatchJobParameters = f15f0 + } + if resp.TargetParameters.CloudWatchLogsParameters != nil { + f15f1 := &svcapitypes.PipeTargetCloudWatchLogsParameters{} + if resp.TargetParameters.CloudWatchLogsParameters.LogStreamName != nil { + f15f1.LogStreamName = resp.TargetParameters.CloudWatchLogsParameters.LogStreamName + } + if resp.TargetParameters.CloudWatchLogsParameters.Timestamp != nil { + f15f1.Timestamp = resp.TargetParameters.CloudWatchLogsParameters.Timestamp + } + f15.CloudWatchLogsParameters = f15f1 + } + if resp.TargetParameters.EcsTaskParameters != nil { + f15f2 := &svcapitypes.PipeTargetECSTaskParameters{} + if resp.TargetParameters.EcsTaskParameters.CapacityProviderStrategy != nil { + f15f2f0 := []*svcapitypes.CapacityProviderStrategyItem{} + for _, f15f2f0iter := range resp.TargetParameters.EcsTaskParameters.CapacityProviderStrategy { + f15f2f0elem := &svcapitypes.CapacityProviderStrategyItem{} + if f15f2f0iter.Base != nil { + f15f2f0elem.Base = f15f2f0iter.Base + } + if f15f2f0iter.CapacityProvider != nil { + f15f2f0elem.CapacityProvider = f15f2f0iter.CapacityProvider + } + if f15f2f0iter.Weight != nil { + f15f2f0elem.Weight = f15f2f0iter.Weight + } + f15f2f0 = append(f15f2f0, f15f2f0elem) + } + f15f2.CapacityProviderStrategy = f15f2f0 + } + if resp.TargetParameters.EcsTaskParameters.EnableECSManagedTags != nil { + f15f2.EnableECSManagedTags = resp.TargetParameters.EcsTaskParameters.EnableECSManagedTags + } + if resp.TargetParameters.EcsTaskParameters.EnableExecuteCommand != nil { + f15f2.EnableExecuteCommand = resp.TargetParameters.EcsTaskParameters.EnableExecuteCommand + } + if resp.TargetParameters.EcsTaskParameters.Group != nil { + f15f2.Group = resp.TargetParameters.EcsTaskParameters.Group + } + if resp.TargetParameters.EcsTaskParameters.LaunchType != nil { + f15f2.LaunchType = resp.TargetParameters.EcsTaskParameters.LaunchType + } + if resp.TargetParameters.EcsTaskParameters.NetworkConfiguration != nil { + f15f2f5 := &svcapitypes.NetworkConfiguration{} + if resp.TargetParameters.EcsTaskParameters.NetworkConfiguration.AwsvpcConfiguration != nil { + f15f2f5f0 := &svcapitypes.AWSVPCConfiguration{} + if resp.TargetParameters.EcsTaskParameters.NetworkConfiguration.AwsvpcConfiguration.AssignPublicIp != nil { + f15f2f5f0.AssignPublicIP = resp.TargetParameters.EcsTaskParameters.NetworkConfiguration.AwsvpcConfiguration.AssignPublicIp + } + if resp.TargetParameters.EcsTaskParameters.NetworkConfiguration.AwsvpcConfiguration.SecurityGroups != nil { + f15f2f5f0f1 := []*string{} + for _, f15f2f5f0f1iter := range resp.TargetParameters.EcsTaskParameters.NetworkConfiguration.AwsvpcConfiguration.SecurityGroups { + var f15f2f5f0f1elem string + f15f2f5f0f1elem = *f15f2f5f0f1iter + f15f2f5f0f1 = append(f15f2f5f0f1, &f15f2f5f0f1elem) + } + f15f2f5f0.SecurityGroups = f15f2f5f0f1 + } + if resp.TargetParameters.EcsTaskParameters.NetworkConfiguration.AwsvpcConfiguration.Subnets != nil { + f15f2f5f0f2 := []*string{} + for _, f15f2f5f0f2iter := range resp.TargetParameters.EcsTaskParameters.NetworkConfiguration.AwsvpcConfiguration.Subnets { + var f15f2f5f0f2elem string + f15f2f5f0f2elem = *f15f2f5f0f2iter + f15f2f5f0f2 = append(f15f2f5f0f2, &f15f2f5f0f2elem) + } + f15f2f5f0.Subnets = f15f2f5f0f2 + } + f15f2f5.AWSVPCConfiguration = f15f2f5f0 + } + f15f2.NetworkConfiguration = f15f2f5 + } + if resp.TargetParameters.EcsTaskParameters.Overrides != nil { + f15f2f6 := &svcapitypes.ECSTaskOverride{} + if resp.TargetParameters.EcsTaskParameters.Overrides.ContainerOverrides != nil { + f15f2f6f0 := []*svcapitypes.ECSContainerOverride{} + for _, f15f2f6f0iter := range resp.TargetParameters.EcsTaskParameters.Overrides.ContainerOverrides { + f15f2f6f0elem := &svcapitypes.ECSContainerOverride{} + if f15f2f6f0iter.Command != nil { + f15f2f6f0elemf0 := []*string{} + for _, f15f2f6f0elemf0iter := range f15f2f6f0iter.Command { + var f15f2f6f0elemf0elem string + f15f2f6f0elemf0elem = *f15f2f6f0elemf0iter + f15f2f6f0elemf0 = append(f15f2f6f0elemf0, &f15f2f6f0elemf0elem) + } + f15f2f6f0elem.Command = f15f2f6f0elemf0 + } + if f15f2f6f0iter.Cpu != nil { + f15f2f6f0elem.CPU = f15f2f6f0iter.Cpu + } + if f15f2f6f0iter.Environment != nil { + f15f2f6f0elemf2 := []*svcapitypes.ECSEnvironmentVariable{} + for _, f15f2f6f0elemf2iter := range f15f2f6f0iter.Environment { + f15f2f6f0elemf2elem := &svcapitypes.ECSEnvironmentVariable{} + if f15f2f6f0elemf2iter.Name != nil { + f15f2f6f0elemf2elem.Name = f15f2f6f0elemf2iter.Name + } + if f15f2f6f0elemf2iter.Value != nil { + f15f2f6f0elemf2elem.Value = f15f2f6f0elemf2iter.Value + } + f15f2f6f0elemf2 = append(f15f2f6f0elemf2, f15f2f6f0elemf2elem) + } + f15f2f6f0elem.Environment = f15f2f6f0elemf2 + } + if f15f2f6f0iter.EnvironmentFiles != nil { + f15f2f6f0elemf3 := []*svcapitypes.ECSEnvironmentFile{} + for _, f15f2f6f0elemf3iter := range f15f2f6f0iter.EnvironmentFiles { + f15f2f6f0elemf3elem := &svcapitypes.ECSEnvironmentFile{} + if f15f2f6f0elemf3iter.Type != nil { + f15f2f6f0elemf3elem.Type = f15f2f6f0elemf3iter.Type + } + if f15f2f6f0elemf3iter.Value != nil { + f15f2f6f0elemf3elem.Value = f15f2f6f0elemf3iter.Value + } + f15f2f6f0elemf3 = append(f15f2f6f0elemf3, f15f2f6f0elemf3elem) + } + f15f2f6f0elem.EnvironmentFiles = f15f2f6f0elemf3 + } + if f15f2f6f0iter.Memory != nil { + f15f2f6f0elem.Memory = f15f2f6f0iter.Memory + } + if f15f2f6f0iter.MemoryReservation != nil { + f15f2f6f0elem.MemoryReservation = f15f2f6f0iter.MemoryReservation + } + if f15f2f6f0iter.Name != nil { + f15f2f6f0elem.Name = f15f2f6f0iter.Name + } + if f15f2f6f0iter.ResourceRequirements != nil { + f15f2f6f0elemf7 := []*svcapitypes.ECSResourceRequirement{} + for _, f15f2f6f0elemf7iter := range f15f2f6f0iter.ResourceRequirements { + f15f2f6f0elemf7elem := &svcapitypes.ECSResourceRequirement{} + if f15f2f6f0elemf7iter.Type != nil { + f15f2f6f0elemf7elem.Type = f15f2f6f0elemf7iter.Type + } + if f15f2f6f0elemf7iter.Value != nil { + f15f2f6f0elemf7elem.Value = f15f2f6f0elemf7iter.Value + } + f15f2f6f0elemf7 = append(f15f2f6f0elemf7, f15f2f6f0elemf7elem) + } + f15f2f6f0elem.ResourceRequirements = f15f2f6f0elemf7 + } + f15f2f6f0 = append(f15f2f6f0, f15f2f6f0elem) + } + f15f2f6.ContainerOverrides = f15f2f6f0 + } + if resp.TargetParameters.EcsTaskParameters.Overrides.Cpu != nil { + f15f2f6.CPU = resp.TargetParameters.EcsTaskParameters.Overrides.Cpu + } + if resp.TargetParameters.EcsTaskParameters.Overrides.EphemeralStorage != nil { + f15f2f6f2 := &svcapitypes.ECSEphemeralStorage{} + if resp.TargetParameters.EcsTaskParameters.Overrides.EphemeralStorage.SizeInGiB != nil { + f15f2f6f2.SizeInGiB = resp.TargetParameters.EcsTaskParameters.Overrides.EphemeralStorage.SizeInGiB + } + f15f2f6.EphemeralStorage = f15f2f6f2 + } + if resp.TargetParameters.EcsTaskParameters.Overrides.ExecutionRoleArn != nil { + f15f2f6.ExecutionRoleARN = resp.TargetParameters.EcsTaskParameters.Overrides.ExecutionRoleArn + } + if resp.TargetParameters.EcsTaskParameters.Overrides.InferenceAcceleratorOverrides != nil { + f15f2f6f4 := []*svcapitypes.ECSInferenceAcceleratorOverride{} + for _, f15f2f6f4iter := range resp.TargetParameters.EcsTaskParameters.Overrides.InferenceAcceleratorOverrides { + f15f2f6f4elem := &svcapitypes.ECSInferenceAcceleratorOverride{} + if f15f2f6f4iter.DeviceName != nil { + f15f2f6f4elem.DeviceName = f15f2f6f4iter.DeviceName + } + if f15f2f6f4iter.DeviceType != nil { + f15f2f6f4elem.DeviceType = f15f2f6f4iter.DeviceType + } + f15f2f6f4 = append(f15f2f6f4, f15f2f6f4elem) + } + f15f2f6.InferenceAcceleratorOverrides = f15f2f6f4 + } + if resp.TargetParameters.EcsTaskParameters.Overrides.Memory != nil { + f15f2f6.Memory = resp.TargetParameters.EcsTaskParameters.Overrides.Memory + } + if resp.TargetParameters.EcsTaskParameters.Overrides.TaskRoleArn != nil { + f15f2f6.TaskRoleARN = resp.TargetParameters.EcsTaskParameters.Overrides.TaskRoleArn + } + f15f2.Overrides = f15f2f6 + } + if resp.TargetParameters.EcsTaskParameters.PlacementConstraints != nil { + f15f2f7 := []*svcapitypes.PlacementConstraint{} + for _, f15f2f7iter := range resp.TargetParameters.EcsTaskParameters.PlacementConstraints { + f15f2f7elem := &svcapitypes.PlacementConstraint{} + if f15f2f7iter.Expression != nil { + f15f2f7elem.Expression = f15f2f7iter.Expression + } + if f15f2f7iter.Type != nil { + f15f2f7elem.Type = f15f2f7iter.Type + } + f15f2f7 = append(f15f2f7, f15f2f7elem) + } + f15f2.PlacementConstraints = f15f2f7 + } + if resp.TargetParameters.EcsTaskParameters.PlacementStrategy != nil { + f15f2f8 := []*svcapitypes.PlacementStrategy{} + for _, f15f2f8iter := range resp.TargetParameters.EcsTaskParameters.PlacementStrategy { + f15f2f8elem := &svcapitypes.PlacementStrategy{} + if f15f2f8iter.Field != nil { + f15f2f8elem.Field = f15f2f8iter.Field + } + if f15f2f8iter.Type != nil { + f15f2f8elem.Type = f15f2f8iter.Type + } + f15f2f8 = append(f15f2f8, f15f2f8elem) + } + f15f2.PlacementStrategy = f15f2f8 + } + if resp.TargetParameters.EcsTaskParameters.PlatformVersion != nil { + f15f2.PlatformVersion = resp.TargetParameters.EcsTaskParameters.PlatformVersion + } + if resp.TargetParameters.EcsTaskParameters.PropagateTags != nil { + f15f2.PropagateTags = resp.TargetParameters.EcsTaskParameters.PropagateTags + } + if resp.TargetParameters.EcsTaskParameters.ReferenceId != nil { + f15f2.ReferenceID = resp.TargetParameters.EcsTaskParameters.ReferenceId + } + if resp.TargetParameters.EcsTaskParameters.Tags != nil { + f15f2f12 := []*svcapitypes.Tag{} + for _, f15f2f12iter := range resp.TargetParameters.EcsTaskParameters.Tags { + f15f2f12elem := &svcapitypes.Tag{} + if f15f2f12iter.Key != nil { + f15f2f12elem.Key = f15f2f12iter.Key + } + if f15f2f12iter.Value != nil { + f15f2f12elem.Value = f15f2f12iter.Value + } + f15f2f12 = append(f15f2f12, f15f2f12elem) + } + f15f2.Tags = f15f2f12 + } + if resp.TargetParameters.EcsTaskParameters.TaskCount != nil { + f15f2.TaskCount = resp.TargetParameters.EcsTaskParameters.TaskCount + } + if resp.TargetParameters.EcsTaskParameters.TaskDefinitionArn != nil { + f15f2.TaskDefinitionARN = resp.TargetParameters.EcsTaskParameters.TaskDefinitionArn + } + f15.ECSTaskParameters = f15f2 + } + if resp.TargetParameters.EventBridgeEventBusParameters != nil { + f15f3 := &svcapitypes.PipeTargetEventBridgeEventBusParameters{} + if resp.TargetParameters.EventBridgeEventBusParameters.DetailType != nil { + f15f3.DetailType = resp.TargetParameters.EventBridgeEventBusParameters.DetailType + } + if resp.TargetParameters.EventBridgeEventBusParameters.EndpointId != nil { + f15f3.EndpointID = resp.TargetParameters.EventBridgeEventBusParameters.EndpointId + } + if resp.TargetParameters.EventBridgeEventBusParameters.Resources != nil { + f15f3f2 := []*string{} + for _, f15f3f2iter := range resp.TargetParameters.EventBridgeEventBusParameters.Resources { + var f15f3f2elem string + f15f3f2elem = *f15f3f2iter + f15f3f2 = append(f15f3f2, &f15f3f2elem) + } + f15f3.Resources = f15f3f2 + } + if resp.TargetParameters.EventBridgeEventBusParameters.Source != nil { + f15f3.Source = resp.TargetParameters.EventBridgeEventBusParameters.Source + } + if resp.TargetParameters.EventBridgeEventBusParameters.Time != nil { + f15f3.Time = resp.TargetParameters.EventBridgeEventBusParameters.Time + } + f15.EventBridgeEventBusParameters = f15f3 + } + if resp.TargetParameters.HttpParameters != nil { + f15f4 := &svcapitypes.PipeTargetHTTPParameters{} + if resp.TargetParameters.HttpParameters.HeaderParameters != nil { + f15f4f0 := map[string]*string{} + for f15f4f0key, f15f4f0valiter := range resp.TargetParameters.HttpParameters.HeaderParameters { + var f15f4f0val string + f15f4f0val = *f15f4f0valiter + f15f4f0[f15f4f0key] = &f15f4f0val + } + f15f4.HeaderParameters = f15f4f0 + } + if resp.TargetParameters.HttpParameters.PathParameterValues != nil { + f15f4f1 := []*string{} + for _, f15f4f1iter := range resp.TargetParameters.HttpParameters.PathParameterValues { + var f15f4f1elem string + f15f4f1elem = *f15f4f1iter + f15f4f1 = append(f15f4f1, &f15f4f1elem) + } + f15f4.PathParameterValues = f15f4f1 + } + if resp.TargetParameters.HttpParameters.QueryStringParameters != nil { + f15f4f2 := map[string]*string{} + for f15f4f2key, f15f4f2valiter := range resp.TargetParameters.HttpParameters.QueryStringParameters { + var f15f4f2val string + f15f4f2val = *f15f4f2valiter + f15f4f2[f15f4f2key] = &f15f4f2val + } + f15f4.QueryStringParameters = f15f4f2 + } + f15.HTTPParameters = f15f4 + } + if resp.TargetParameters.InputTemplate != nil { + f15.InputTemplate = resp.TargetParameters.InputTemplate + } + if resp.TargetParameters.KinesisStreamParameters != nil { + f15f6 := &svcapitypes.PipeTargetKinesisStreamParameters{} + if resp.TargetParameters.KinesisStreamParameters.PartitionKey != nil { + f15f6.PartitionKey = resp.TargetParameters.KinesisStreamParameters.PartitionKey + } + f15.KinesisStreamParameters = f15f6 + } + if resp.TargetParameters.LambdaFunctionParameters != nil { + f15f7 := &svcapitypes.PipeTargetLambdaFunctionParameters{} + if resp.TargetParameters.LambdaFunctionParameters.InvocationType != nil { + f15f7.InvocationType = resp.TargetParameters.LambdaFunctionParameters.InvocationType + } + f15.LambdaFunctionParameters = f15f7 + } + if resp.TargetParameters.RedshiftDataParameters != nil { + f15f8 := &svcapitypes.PipeTargetRedshiftDataParameters{} + if resp.TargetParameters.RedshiftDataParameters.Database != nil { + f15f8.Database = resp.TargetParameters.RedshiftDataParameters.Database + } + if resp.TargetParameters.RedshiftDataParameters.DbUser != nil { + f15f8.DBUser = resp.TargetParameters.RedshiftDataParameters.DbUser + } + if resp.TargetParameters.RedshiftDataParameters.SecretManagerArn != nil { + f15f8.SecretManagerARN = resp.TargetParameters.RedshiftDataParameters.SecretManagerArn + } + if resp.TargetParameters.RedshiftDataParameters.Sqls != nil { + f15f8f3 := []*string{} + for _, f15f8f3iter := range resp.TargetParameters.RedshiftDataParameters.Sqls { + var f15f8f3elem string + f15f8f3elem = *f15f8f3iter + f15f8f3 = append(f15f8f3, &f15f8f3elem) + } + f15f8.SQLs = f15f8f3 + } + if resp.TargetParameters.RedshiftDataParameters.StatementName != nil { + f15f8.StatementName = resp.TargetParameters.RedshiftDataParameters.StatementName + } + if resp.TargetParameters.RedshiftDataParameters.WithEvent != nil { + f15f8.WithEvent = resp.TargetParameters.RedshiftDataParameters.WithEvent + } + f15.RedshiftDataParameters = f15f8 + } + if resp.TargetParameters.SageMakerPipelineParameters != nil { + f15f9 := &svcapitypes.PipeTargetSageMakerPipelineParameters{} + if resp.TargetParameters.SageMakerPipelineParameters.PipelineParameterList != nil { + f15f9f0 := []*svcapitypes.SageMakerPipelineParameter{} + for _, f15f9f0iter := range resp.TargetParameters.SageMakerPipelineParameters.PipelineParameterList { + f15f9f0elem := &svcapitypes.SageMakerPipelineParameter{} + if f15f9f0iter.Name != nil { + f15f9f0elem.Name = f15f9f0iter.Name + } + if f15f9f0iter.Value != nil { + f15f9f0elem.Value = f15f9f0iter.Value + } + f15f9f0 = append(f15f9f0, f15f9f0elem) + } + f15f9.PipelineParameterList = f15f9f0 + } + f15.SageMakerPipelineParameters = f15f9 + } + if resp.TargetParameters.SqsQueueParameters != nil { + f15f10 := &svcapitypes.PipeTargetSQSQueueParameters{} + if resp.TargetParameters.SqsQueueParameters.MessageDeduplicationId != nil { + f15f10.MessageDeduplicationID = resp.TargetParameters.SqsQueueParameters.MessageDeduplicationId + } + if resp.TargetParameters.SqsQueueParameters.MessageGroupId != nil { + f15f10.MessageGroupID = resp.TargetParameters.SqsQueueParameters.MessageGroupId + } + f15.SQSQueueParameters = f15f10 + } + if resp.TargetParameters.StepFunctionStateMachineParameters != nil { + f15f11 := &svcapitypes.PipeTargetStateMachineParameters{} + if resp.TargetParameters.StepFunctionStateMachineParameters.InvocationType != nil { + f15f11.InvocationType = resp.TargetParameters.StepFunctionStateMachineParameters.InvocationType + } + f15.StepFunctionStateMachineParameters = f15f11 + } + ko.Spec.TargetParameters = f15 + } else { + ko.Spec.TargetParameters = nil + } + + rm.setStatusDefaults(ko) + return &resource{ko}, nil +} + +// requiredFieldsMissingFromReadOneInput returns true if there are any fields +// for the ReadOne Input shape that are required but not present in the +// resource's Spec or Status +func (rm *resourceManager) requiredFieldsMissingFromReadOneInput( + r *resource, +) bool { + return r.ko.Spec.Name == nil + +} + +// newDescribeRequestPayload returns SDK-specific struct for the HTTP request +// payload of the Describe API call for the resource +func (rm *resourceManager) newDescribeRequestPayload( + r *resource, +) (*svcsdk.DescribePipeInput, error) { + res := &svcsdk.DescribePipeInput{} + + if r.ko.Spec.Name != nil { + res.SetName(*r.ko.Spec.Name) + } + + return res, nil +} + +// sdkCreate creates the supplied resource in the backend AWS service API and +// returns a copy of the resource with resource fields (in both Spec and +// Status) filled in with values from the CREATE API operation's Output shape. +func (rm *resourceManager) sdkCreate( + ctx context.Context, + desired *resource, +) (created *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkCreate") + defer func() { + exit(err) + }() + input, err := rm.newCreateRequestPayload(ctx, desired) + if err != nil { + return nil, err + } + + var resp *svcsdk.CreatePipeOutput + _ = resp + resp, err = rm.sdkapi.CreatePipeWithContext(ctx, input) + rm.metrics.RecordAPICall("CREATE", "CreatePipe", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.Arn != nil { + arn := ackv1alpha1.AWSResourceName(*resp.Arn) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.CreationTime != nil { + ko.Status.CreationTime = &metav1.Time{*resp.CreationTime} + } else { + ko.Status.CreationTime = nil + } + if resp.CurrentState != nil { + ko.Status.CurrentState = resp.CurrentState + } else { + ko.Status.CurrentState = nil + } + if resp.DesiredState != nil { + ko.Spec.DesiredState = resp.DesiredState + } else { + ko.Spec.DesiredState = nil + } + if resp.LastModifiedTime != nil { + ko.Status.LastModifiedTime = &metav1.Time{*resp.LastModifiedTime} + } else { + ko.Status.LastModifiedTime = nil + } + if resp.Name != nil { + ko.Spec.Name = resp.Name + } else { + ko.Spec.Name = nil + } + + rm.setStatusDefaults(ko) + if !pipeAvailable(&resource{ko}) { + return &resource{ko}, requeueWaitWhileCreating + } + return &resource{ko}, nil +} + +// newCreateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newCreateRequestPayload( + ctx context.Context, + r *resource, +) (*svcsdk.CreatePipeInput, error) { + res := &svcsdk.CreatePipeInput{} + + if r.ko.Spec.Description != nil { + res.SetDescription(*r.ko.Spec.Description) + } + if r.ko.Spec.DesiredState != nil { + res.SetDesiredState(*r.ko.Spec.DesiredState) + } + if r.ko.Spec.Enrichment != nil { + res.SetEnrichment(*r.ko.Spec.Enrichment) + } + if r.ko.Spec.EnrichmentParameters != nil { + f3 := &svcsdk.PipeEnrichmentParameters{} + if r.ko.Spec.EnrichmentParameters.HTTPParameters != nil { + f3f0 := &svcsdk.PipeEnrichmentHttpParameters{} + if r.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters != nil { + f3f0f0 := map[string]*string{} + for f3f0f0key, f3f0f0valiter := range r.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters { + var f3f0f0val string + f3f0f0val = *f3f0f0valiter + f3f0f0[f3f0f0key] = &f3f0f0val + } + f3f0.SetHeaderParameters(f3f0f0) + } + if r.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues != nil { + f3f0f1 := []*string{} + for _, f3f0f1iter := range r.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues { + var f3f0f1elem string + f3f0f1elem = *f3f0f1iter + f3f0f1 = append(f3f0f1, &f3f0f1elem) + } + f3f0.SetPathParameterValues(f3f0f1) + } + if r.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters != nil { + f3f0f2 := map[string]*string{} + for f3f0f2key, f3f0f2valiter := range r.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters { + var f3f0f2val string + f3f0f2val = *f3f0f2valiter + f3f0f2[f3f0f2key] = &f3f0f2val + } + f3f0.SetQueryStringParameters(f3f0f2) + } + f3.SetHttpParameters(f3f0) + } + if r.ko.Spec.EnrichmentParameters.InputTemplate != nil { + f3.SetInputTemplate(*r.ko.Spec.EnrichmentParameters.InputTemplate) + } + res.SetEnrichmentParameters(f3) + } + if r.ko.Spec.Name != nil { + res.SetName(*r.ko.Spec.Name) + } + if r.ko.Spec.RoleARN != nil { + res.SetRoleArn(*r.ko.Spec.RoleARN) + } + if r.ko.Spec.Source != nil { + res.SetSource(*r.ko.Spec.Source) + } + if r.ko.Spec.SourceParameters != nil { + f7 := &svcsdk.PipeSourceParameters{} + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters != nil { + f7f0 := &svcsdk.PipeSourceActiveMQBrokerParameters{} + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize != nil { + f7f0.SetBatchSize(*r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials != nil { + f7f0f1 := &svcsdk.MQBrokerAccessCredentials{} + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth != nil { + f7f0f1.SetBasicAuth(*r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth) + } + f7f0.SetCredentials(f7f0f1) + } + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + f7f0.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName != nil { + f7f0.SetQueueName(*r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.QueueName) + } + f7.SetActiveMQBrokerParameters(f7f0) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters != nil { + f7f1 := &svcsdk.PipeSourceDynamoDBStreamParameters{} + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize != nil { + f7f1.SetBatchSize(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig != nil { + f7f1f1 := &svcsdk.DeadLetterConfig{} + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN != nil { + f7f1f1.SetArn(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN) + } + f7f1.SetDeadLetterConfig(f7f1f1) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds != nil { + f7f1.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds != nil { + f7f1.SetMaximumRecordAgeInSeconds(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts != nil { + f7f1.SetMaximumRetryAttempts(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure != nil { + f7f1.SetOnPartialBatchItemFailure(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor != nil { + f7f1.SetParallelizationFactor(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition != nil { + f7f1.SetStartingPosition(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.StartingPosition) + } + f7.SetDynamoDBStreamParameters(f7f1) + } + if r.ko.Spec.SourceParameters.FilterCriteria != nil { + f7f2 := &svcsdk.FilterCriteria{} + if r.ko.Spec.SourceParameters.FilterCriteria.Filters != nil { + f7f2f0 := []*svcsdk.Filter{} + for _, f7f2f0iter := range r.ko.Spec.SourceParameters.FilterCriteria.Filters { + f7f2f0elem := &svcsdk.Filter{} + if f7f2f0iter.Pattern != nil { + f7f2f0elem.SetPattern(*f7f2f0iter.Pattern) + } + f7f2f0 = append(f7f2f0, f7f2f0elem) + } + f7f2.SetFilters(f7f2f0) + } + f7.SetFilterCriteria(f7f2) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters != nil { + f7f3 := &svcsdk.PipeSourceKinesisStreamParameters{} + if r.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize != nil { + f7f3.SetBatchSize(*r.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig != nil { + f7f3f1 := &svcsdk.DeadLetterConfig{} + if r.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN != nil { + f7f3f1.SetArn(*r.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN) + } + f7f3.SetDeadLetterConfig(f7f3f1) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds != nil { + f7f3.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds != nil { + f7f3.SetMaximumRecordAgeInSeconds(*r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts != nil { + f7f3.SetMaximumRetryAttempts(*r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure != nil { + f7f3.SetOnPartialBatchItemFailure(*r.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor != nil { + f7f3.SetParallelizationFactor(*r.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition != nil { + f7f3.SetStartingPosition(*r.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPosition) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp != nil { + f7f3.SetStartingPositionTimestamp(r.ko.Spec.SourceParameters.KinesisStreamParameters.StartingPositionTimestamp.Time) + } + f7.SetKinesisStreamParameters(f7f3) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters != nil { + f7f4 := &svcsdk.PipeSourceManagedStreamingKafkaParameters{} + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize != nil { + f7f4.SetBatchSize(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID != nil { + f7f4.SetConsumerGroupID(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.ConsumerGroupID) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials != nil { + f7f4f2 := &svcsdk.MSKAccessCredentials{} + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth != nil { + f7f4f2.SetClientCertificateTlsAuth(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth != nil { + f7f4f2.SetSaslScram512Auth(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth) + } + f7f4.SetCredentials(f7f4f2) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds != nil { + f7f4.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition != nil { + f7f4.SetStartingPosition(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.StartingPosition) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName != nil { + f7f4.SetTopicName(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.TopicName) + } + f7.SetManagedStreamingKafkaParameters(f7f4) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters != nil { + f7f5 := &svcsdk.PipeSourceRabbitMQBrokerParameters{} + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize != nil { + f7f5.SetBatchSize(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials != nil { + f7f5f1 := &svcsdk.MQBrokerAccessCredentials{} + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth != nil { + f7f5f1.SetBasicAuth(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth) + } + f7f5.SetCredentials(f7f5f1) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + f7f5.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName != nil { + f7f5.SetQueueName(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.QueueName) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost != nil { + f7f5.SetVirtualHost(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.VirtualHost) + } + f7.SetRabbitMQBrokerParameters(f7f5) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters != nil { + f7f6 := &svcsdk.PipeSourceSelfManagedKafkaParameters{} + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers != nil { + f7f6f0 := []*string{} + for _, f7f6f0iter := range r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.AdditionalBootstrapServers { + var f7f6f0elem string + f7f6f0elem = *f7f6f0iter + f7f6f0 = append(f7f6f0, &f7f6f0elem) + } + f7f6.SetAdditionalBootstrapServers(f7f6f0) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize != nil { + f7f6.SetBatchSize(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID != nil { + f7f6.SetConsumerGroupID(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ConsumerGroupID) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials != nil { + f7f6f3 := &svcsdk.SelfManagedKafkaAccessConfigurationCredentials{} + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth != nil { + f7f6f3.SetBasicAuth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth != nil { + f7f6f3.SetClientCertificateTlsAuth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth != nil { + f7f6f3.SetSaslScram256Auth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth != nil { + f7f6f3.SetSaslScram512Auth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth) + } + f7f6.SetCredentials(f7f6f3) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds != nil { + f7f6.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate != nil { + f7f6.SetServerRootCaCertificate(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition != nil { + f7f6.SetStartingPosition(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.StartingPosition) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName != nil { + f7f6.SetTopicName(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.TopicName) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC != nil { + f7f6f8 := &svcsdk.SelfManagedKafkaAccessConfigurationVpc{} + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup != nil { + f7f6f8f0 := []*string{} + for _, f7f6f8f0iter := range r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup { + var f7f6f8f0elem string + f7f6f8f0elem = *f7f6f8f0iter + f7f6f8f0 = append(f7f6f8f0, &f7f6f8f0elem) + } + f7f6f8.SetSecurityGroup(f7f6f8f0) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets != nil { + f7f6f8f1 := []*string{} + for _, f7f6f8f1iter := range r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets { + var f7f6f8f1elem string + f7f6f8f1elem = *f7f6f8f1iter + f7f6f8f1 = append(f7f6f8f1, &f7f6f8f1elem) + } + f7f6f8.SetSubnets(f7f6f8f1) + } + f7f6.SetVpc(f7f6f8) + } + f7.SetSelfManagedKafkaParameters(f7f6) + } + if r.ko.Spec.SourceParameters.SQSQueueParameters != nil { + f7f7 := &svcsdk.PipeSourceSqsQueueParameters{} + if r.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize != nil { + f7f7.SetBatchSize(*r.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds != nil { + f7f7.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds) + } + f7.SetSqsQueueParameters(f7f7) + } + res.SetSourceParameters(f7) + } + if r.ko.Spec.Tags != nil { + f8 := map[string]*string{} + for f8key, f8valiter := range r.ko.Spec.Tags { + var f8val string + f8val = *f8valiter + f8[f8key] = &f8val + } + res.SetTags(f8) + } + if r.ko.Spec.Target != nil { + res.SetTarget(*r.ko.Spec.Target) + } + if r.ko.Spec.TargetParameters != nil { + f10 := &svcsdk.PipeTargetParameters{} + if r.ko.Spec.TargetParameters.BatchJobParameters != nil { + f10f0 := &svcsdk.PipeTargetBatchJobParameters{} + if r.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties != nil { + f10f0f0 := &svcsdk.BatchArrayProperties{} + if r.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size != nil { + f10f0f0.SetSize(*r.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size) + } + f10f0.SetArrayProperties(f10f0f0) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides != nil { + f10f0f1 := &svcsdk.BatchContainerOverrides{} + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command != nil { + f10f0f1f0 := []*string{} + for _, f10f0f1f0iter := range r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command { + var f10f0f1f0elem string + f10f0f1f0elem = *f10f0f1f0iter + f10f0f1f0 = append(f10f0f1f0, &f10f0f1f0elem) + } + f10f0f1.SetCommand(f10f0f1f0) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment != nil { + f10f0f1f1 := []*svcsdk.BatchEnvironmentVariable{} + for _, f10f0f1f1iter := range r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment { + f10f0f1f1elem := &svcsdk.BatchEnvironmentVariable{} + if f10f0f1f1iter.Name != nil { + f10f0f1f1elem.SetName(*f10f0f1f1iter.Name) + } + if f10f0f1f1iter.Value != nil { + f10f0f1f1elem.SetValue(*f10f0f1f1iter.Value) + } + f10f0f1f1 = append(f10f0f1f1, f10f0f1f1elem) + } + f10f0f1.SetEnvironment(f10f0f1f1) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType != nil { + f10f0f1.SetInstanceType(*r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements != nil { + f10f0f1f3 := []*svcsdk.BatchResourceRequirement{} + for _, f10f0f1f3iter := range r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements { + f10f0f1f3elem := &svcsdk.BatchResourceRequirement{} + if f10f0f1f3iter.Type != nil { + f10f0f1f3elem.SetType(*f10f0f1f3iter.Type) + } + if f10f0f1f3iter.Value != nil { + f10f0f1f3elem.SetValue(*f10f0f1f3iter.Value) + } + f10f0f1f3 = append(f10f0f1f3, f10f0f1f3elem) + } + f10f0f1.SetResourceRequirements(f10f0f1f3) + } + f10f0.SetContainerOverrides(f10f0f1) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.DependsOn != nil { + f10f0f2 := []*svcsdk.BatchJobDependency{} + for _, f10f0f2iter := range r.ko.Spec.TargetParameters.BatchJobParameters.DependsOn { + f10f0f2elem := &svcsdk.BatchJobDependency{} + if f10f0f2iter.JobID != nil { + f10f0f2elem.SetJobId(*f10f0f2iter.JobID) + } + if f10f0f2iter.Type != nil { + f10f0f2elem.SetType(*f10f0f2iter.Type) + } + f10f0f2 = append(f10f0f2, f10f0f2elem) + } + f10f0.SetDependsOn(f10f0f2) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition != nil { + f10f0.SetJobDefinition(*r.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.JobName != nil { + f10f0.SetJobName(*r.ko.Spec.TargetParameters.BatchJobParameters.JobName) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.Parameters != nil { + f10f0f5 := map[string]*string{} + for f10f0f5key, f10f0f5valiter := range r.ko.Spec.TargetParameters.BatchJobParameters.Parameters { + var f10f0f5val string + f10f0f5val = *f10f0f5valiter + f10f0f5[f10f0f5key] = &f10f0f5val + } + f10f0.SetParameters(f10f0f5) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy != nil { + f10f0f6 := &svcsdk.BatchRetryStrategy{} + if r.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts != nil { + f10f0f6.SetAttempts(*r.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts) + } + f10f0.SetRetryStrategy(f10f0f6) + } + f10.SetBatchJobParameters(f10f0) + } + if r.ko.Spec.TargetParameters.CloudWatchLogsParameters != nil { + f10f1 := &svcsdk.PipeTargetCloudWatchLogsParameters{} + if r.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName != nil { + f10f1.SetLogStreamName(*r.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName) + } + if r.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp != nil { + f10f1.SetTimestamp(*r.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp) + } + f10.SetCloudWatchLogsParameters(f10f1) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters != nil { + f10f2 := &svcsdk.PipeTargetEcsTaskParameters{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy != nil { + f10f2f0 := []*svcsdk.CapacityProviderStrategyItem{} + for _, f10f2f0iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy { + f10f2f0elem := &svcsdk.CapacityProviderStrategyItem{} + if f10f2f0iter.Base != nil { + f10f2f0elem.SetBase(*f10f2f0iter.Base) + } + if f10f2f0iter.CapacityProvider != nil { + f10f2f0elem.SetCapacityProvider(*f10f2f0iter.CapacityProvider) + } + if f10f2f0iter.Weight != nil { + f10f2f0elem.SetWeight(*f10f2f0iter.Weight) + } + f10f2f0 = append(f10f2f0, f10f2f0elem) + } + f10f2.SetCapacityProviderStrategy(f10f2f0) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags != nil { + f10f2.SetEnableECSManagedTags(*r.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand != nil { + f10f2.SetEnableExecuteCommand(*r.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Group != nil { + f10f2.SetGroup(*r.ko.Spec.TargetParameters.ECSTaskParameters.Group) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType != nil { + f10f2.SetLaunchType(*r.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration != nil { + f10f2f5 := &svcsdk.NetworkConfiguration{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration != nil { + f10f2f5f0 := &svcsdk.AwsVpcConfiguration{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP != nil { + f10f2f5f0.SetAssignPublicIp(*r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups != nil { + f10f2f5f0f1 := []*string{} + for _, f10f2f5f0f1iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups { + var f10f2f5f0f1elem string + f10f2f5f0f1elem = *f10f2f5f0f1iter + f10f2f5f0f1 = append(f10f2f5f0f1, &f10f2f5f0f1elem) + } + f10f2f5f0.SetSecurityGroups(f10f2f5f0f1) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets != nil { + f10f2f5f0f2 := []*string{} + for _, f10f2f5f0f2iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets { + var f10f2f5f0f2elem string + f10f2f5f0f2elem = *f10f2f5f0f2iter + f10f2f5f0f2 = append(f10f2f5f0f2, &f10f2f5f0f2elem) + } + f10f2f5f0.SetSubnets(f10f2f5f0f2) + } + f10f2f5.SetAwsvpcConfiguration(f10f2f5f0) + } + f10f2.SetNetworkConfiguration(f10f2f5) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides != nil { + f10f2f6 := &svcsdk.EcsTaskOverride{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides != nil { + f10f2f6f0 := []*svcsdk.EcsContainerOverride{} + for _, f10f2f6f0iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides { + f10f2f6f0elem := &svcsdk.EcsContainerOverride{} + if f10f2f6f0iter.Command != nil { + f10f2f6f0elemf0 := []*string{} + for _, f10f2f6f0elemf0iter := range f10f2f6f0iter.Command { + var f10f2f6f0elemf0elem string + f10f2f6f0elemf0elem = *f10f2f6f0elemf0iter + f10f2f6f0elemf0 = append(f10f2f6f0elemf0, &f10f2f6f0elemf0elem) + } + f10f2f6f0elem.SetCommand(f10f2f6f0elemf0) + } + if f10f2f6f0iter.CPU != nil { + f10f2f6f0elem.SetCpu(*f10f2f6f0iter.CPU) + } + if f10f2f6f0iter.Environment != nil { + f10f2f6f0elemf2 := []*svcsdk.EcsEnvironmentVariable{} + for _, f10f2f6f0elemf2iter := range f10f2f6f0iter.Environment { + f10f2f6f0elemf2elem := &svcsdk.EcsEnvironmentVariable{} + if f10f2f6f0elemf2iter.Name != nil { + f10f2f6f0elemf2elem.SetName(*f10f2f6f0elemf2iter.Name) + } + if f10f2f6f0elemf2iter.Value != nil { + f10f2f6f0elemf2elem.SetValue(*f10f2f6f0elemf2iter.Value) + } + f10f2f6f0elemf2 = append(f10f2f6f0elemf2, f10f2f6f0elemf2elem) + } + f10f2f6f0elem.SetEnvironment(f10f2f6f0elemf2) + } + if f10f2f6f0iter.EnvironmentFiles != nil { + f10f2f6f0elemf3 := []*svcsdk.EcsEnvironmentFile{} + for _, f10f2f6f0elemf3iter := range f10f2f6f0iter.EnvironmentFiles { + f10f2f6f0elemf3elem := &svcsdk.EcsEnvironmentFile{} + if f10f2f6f0elemf3iter.Type != nil { + f10f2f6f0elemf3elem.SetType(*f10f2f6f0elemf3iter.Type) + } + if f10f2f6f0elemf3iter.Value != nil { + f10f2f6f0elemf3elem.SetValue(*f10f2f6f0elemf3iter.Value) + } + f10f2f6f0elemf3 = append(f10f2f6f0elemf3, f10f2f6f0elemf3elem) + } + f10f2f6f0elem.SetEnvironmentFiles(f10f2f6f0elemf3) + } + if f10f2f6f0iter.Memory != nil { + f10f2f6f0elem.SetMemory(*f10f2f6f0iter.Memory) + } + if f10f2f6f0iter.MemoryReservation != nil { + f10f2f6f0elem.SetMemoryReservation(*f10f2f6f0iter.MemoryReservation) + } + if f10f2f6f0iter.Name != nil { + f10f2f6f0elem.SetName(*f10f2f6f0iter.Name) + } + if f10f2f6f0iter.ResourceRequirements != nil { + f10f2f6f0elemf7 := []*svcsdk.EcsResourceRequirement{} + for _, f10f2f6f0elemf7iter := range f10f2f6f0iter.ResourceRequirements { + f10f2f6f0elemf7elem := &svcsdk.EcsResourceRequirement{} + if f10f2f6f0elemf7iter.Type != nil { + f10f2f6f0elemf7elem.SetType(*f10f2f6f0elemf7iter.Type) + } + if f10f2f6f0elemf7iter.Value != nil { + f10f2f6f0elemf7elem.SetValue(*f10f2f6f0elemf7iter.Value) + } + f10f2f6f0elemf7 = append(f10f2f6f0elemf7, f10f2f6f0elemf7elem) + } + f10f2f6f0elem.SetResourceRequirements(f10f2f6f0elemf7) + } + f10f2f6f0 = append(f10f2f6f0, f10f2f6f0elem) + } + f10f2f6.SetContainerOverrides(f10f2f6f0) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU != nil { + f10f2f6.SetCpu(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage != nil { + f10f2f6f2 := &svcsdk.EcsEphemeralStorage{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB != nil { + f10f2f6f2.SetSizeInGiB(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB) + } + f10f2f6.SetEphemeralStorage(f10f2f6f2) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN != nil { + f10f2f6.SetExecutionRoleArn(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides != nil { + f10f2f6f4 := []*svcsdk.EcsInferenceAcceleratorOverride{} + for _, f10f2f6f4iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides { + f10f2f6f4elem := &svcsdk.EcsInferenceAcceleratorOverride{} + if f10f2f6f4iter.DeviceName != nil { + f10f2f6f4elem.SetDeviceName(*f10f2f6f4iter.DeviceName) + } + if f10f2f6f4iter.DeviceType != nil { + f10f2f6f4elem.SetDeviceType(*f10f2f6f4iter.DeviceType) + } + f10f2f6f4 = append(f10f2f6f4, f10f2f6f4elem) + } + f10f2f6.SetInferenceAcceleratorOverrides(f10f2f6f4) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory != nil { + f10f2f6.SetMemory(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN != nil { + f10f2f6.SetTaskRoleArn(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN) + } + f10f2.SetOverrides(f10f2f6) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints != nil { + f10f2f7 := []*svcsdk.PlacementConstraint{} + for _, f10f2f7iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints { + f10f2f7elem := &svcsdk.PlacementConstraint{} + if f10f2f7iter.Expression != nil { + f10f2f7elem.SetExpression(*f10f2f7iter.Expression) + } + if f10f2f7iter.Type != nil { + f10f2f7elem.SetType(*f10f2f7iter.Type) + } + f10f2f7 = append(f10f2f7, f10f2f7elem) + } + f10f2.SetPlacementConstraints(f10f2f7) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy != nil { + f10f2f8 := []*svcsdk.PlacementStrategy{} + for _, f10f2f8iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy { + f10f2f8elem := &svcsdk.PlacementStrategy{} + if f10f2f8iter.Field != nil { + f10f2f8elem.SetField(*f10f2f8iter.Field) + } + if f10f2f8iter.Type != nil { + f10f2f8elem.SetType(*f10f2f8iter.Type) + } + f10f2f8 = append(f10f2f8, f10f2f8elem) + } + f10f2.SetPlacementStrategy(f10f2f8) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion != nil { + f10f2.SetPlatformVersion(*r.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags != nil { + f10f2.SetPropagateTags(*r.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID != nil { + f10f2.SetReferenceId(*r.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Tags != nil { + f10f2f12 := []*svcsdk.Tag{} + for _, f10f2f12iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.Tags { + f10f2f12elem := &svcsdk.Tag{} + if f10f2f12iter.Key != nil { + f10f2f12elem.SetKey(*f10f2f12iter.Key) + } + if f10f2f12iter.Value != nil { + f10f2f12elem.SetValue(*f10f2f12iter.Value) + } + f10f2f12 = append(f10f2f12, f10f2f12elem) + } + f10f2.SetTags(f10f2f12) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount != nil { + f10f2.SetTaskCount(*r.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN != nil { + f10f2.SetTaskDefinitionArn(*r.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN) + } + f10.SetEcsTaskParameters(f10f2) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters != nil { + f10f3 := &svcsdk.PipeTargetEventBridgeEventBusParameters{} + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType != nil { + f10f3.SetDetailType(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID != nil { + f10f3.SetEndpointId(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources != nil { + f10f3f2 := []*string{} + for _, f10f3f2iter := range r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources { + var f10f3f2elem string + f10f3f2elem = *f10f3f2iter + f10f3f2 = append(f10f3f2, &f10f3f2elem) + } + f10f3.SetResources(f10f3f2) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source != nil { + f10f3.SetSource(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time != nil { + f10f3.SetTime(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time) + } + f10.SetEventBridgeEventBusParameters(f10f3) + } + if r.ko.Spec.TargetParameters.HTTPParameters != nil { + f10f4 := &svcsdk.PipeTargetHttpParameters{} + if r.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters != nil { + f10f4f0 := map[string]*string{} + for f10f4f0key, f10f4f0valiter := range r.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters { + var f10f4f0val string + f10f4f0val = *f10f4f0valiter + f10f4f0[f10f4f0key] = &f10f4f0val + } + f10f4.SetHeaderParameters(f10f4f0) + } + if r.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues != nil { + f10f4f1 := []*string{} + for _, f10f4f1iter := range r.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues { + var f10f4f1elem string + f10f4f1elem = *f10f4f1iter + f10f4f1 = append(f10f4f1, &f10f4f1elem) + } + f10f4.SetPathParameterValues(f10f4f1) + } + if r.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters != nil { + f10f4f2 := map[string]*string{} + for f10f4f2key, f10f4f2valiter := range r.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters { + var f10f4f2val string + f10f4f2val = *f10f4f2valiter + f10f4f2[f10f4f2key] = &f10f4f2val + } + f10f4.SetQueryStringParameters(f10f4f2) + } + f10.SetHttpParameters(f10f4) + } + if r.ko.Spec.TargetParameters.InputTemplate != nil { + f10.SetInputTemplate(*r.ko.Spec.TargetParameters.InputTemplate) + } + if r.ko.Spec.TargetParameters.KinesisStreamParameters != nil { + f10f6 := &svcsdk.PipeTargetKinesisStreamParameters{} + if r.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey != nil { + f10f6.SetPartitionKey(*r.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey) + } + f10.SetKinesisStreamParameters(f10f6) + } + if r.ko.Spec.TargetParameters.LambdaFunctionParameters != nil { + f10f7 := &svcsdk.PipeTargetLambdaFunctionParameters{} + if r.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType != nil { + f10f7.SetInvocationType(*r.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType) + } + f10.SetLambdaFunctionParameters(f10f7) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters != nil { + f10f8 := &svcsdk.PipeTargetRedshiftDataParameters{} + if r.ko.Spec.TargetParameters.RedshiftDataParameters.Database != nil { + f10f8.SetDatabase(*r.ko.Spec.TargetParameters.RedshiftDataParameters.Database) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser != nil { + f10f8.SetDbUser(*r.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN != nil { + f10f8.SetSecretManagerArn(*r.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs != nil { + f10f8f3 := []*string{} + for _, f10f8f3iter := range r.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs { + var f10f8f3elem string + f10f8f3elem = *f10f8f3iter + f10f8f3 = append(f10f8f3, &f10f8f3elem) + } + f10f8.SetSqls(f10f8f3) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName != nil { + f10f8.SetStatementName(*r.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent != nil { + f10f8.SetWithEvent(*r.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent) + } + f10.SetRedshiftDataParameters(f10f8) + } + if r.ko.Spec.TargetParameters.SageMakerPipelineParameters != nil { + f10f9 := &svcsdk.PipeTargetSageMakerPipelineParameters{} + if r.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList != nil { + f10f9f0 := []*svcsdk.SageMakerPipelineParameter{} + for _, f10f9f0iter := range r.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList { + f10f9f0elem := &svcsdk.SageMakerPipelineParameter{} + if f10f9f0iter.Name != nil { + f10f9f0elem.SetName(*f10f9f0iter.Name) + } + if f10f9f0iter.Value != nil { + f10f9f0elem.SetValue(*f10f9f0iter.Value) + } + f10f9f0 = append(f10f9f0, f10f9f0elem) + } + f10f9.SetPipelineParameterList(f10f9f0) + } + f10.SetSageMakerPipelineParameters(f10f9) + } + if r.ko.Spec.TargetParameters.SQSQueueParameters != nil { + f10f10 := &svcsdk.PipeTargetSqsQueueParameters{} + if r.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID != nil { + f10f10.SetMessageDeduplicationId(*r.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID) + } + if r.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID != nil { + f10f10.SetMessageGroupId(*r.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID) + } + f10.SetSqsQueueParameters(f10f10) + } + if r.ko.Spec.TargetParameters.StepFunctionStateMachineParameters != nil { + f10f11 := &svcsdk.PipeTargetStateMachineParameters{} + if r.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType != nil { + f10f11.SetInvocationType(*r.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType) + } + f10.SetStepFunctionStateMachineParameters(f10f11) + } + res.SetTargetParameters(f10) + } + + return res, nil +} + +// sdkUpdate patches the supplied resource in the backend AWS service API and +// returns a new resource with updated fields. +func (rm *resourceManager) sdkUpdate( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (updated *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkUpdate") + defer func() { + exit(err) + }() + if pipeInMutatingState(latest) { + return latest, requeueWaitWhileUpdating + } + + // hack (continued from delta.go): if there is only a difference in the current + // and desired state (expressed through non-existing Spec field CurrentState, + // continuously requeue so we don't block changes to the resources to recover + // from a FAILED state + if !delta.DifferentExcept("Spec.CurrentState") { + return latest, requeueWaitWhileUpdating + } + + if delta.DifferentAt("Spec.Tags") { + err = rm.updatePipeTags(ctx, latest, desired) + if err != nil { + return nil, err + } + } + + // If no other differences were observe, avoid making UpdatePipe API calls. + if !delta.DifferentExcept("Spec.Tags") { + return desired, nil + } + input, err := rm.newUpdateRequestPayload(ctx, desired, delta) + if err != nil { + return nil, err + } + // we need to explicitly unset nil spec values + unsetRemovedSpecFields(delta, desired.ko.Spec, input) + + var resp *svcsdk.UpdatePipeOutput + _ = resp + resp, err = rm.sdkapi.UpdatePipeWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "UpdatePipe", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + // always requeue with desired state and return immediately due to eventually + // consistent API + return desired, ackrequeue.NeededAfter(nil, defaultRequeueDelay) + + // TODO (@embano1): we can't tell code-gen to not generate the rest of the code + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.Arn != nil { + arn := ackv1alpha1.AWSResourceName(*resp.Arn) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.CreationTime != nil { + ko.Status.CreationTime = &metav1.Time{*resp.CreationTime} + } else { + ko.Status.CreationTime = nil + } + if resp.CurrentState != nil { + ko.Status.CurrentState = resp.CurrentState + } else { + ko.Status.CurrentState = nil + } + if resp.DesiredState != nil { + ko.Spec.DesiredState = resp.DesiredState + } else { + ko.Spec.DesiredState = nil + } + if resp.LastModifiedTime != nil { + ko.Status.LastModifiedTime = &metav1.Time{*resp.LastModifiedTime} + } else { + ko.Status.LastModifiedTime = nil + } + if resp.Name != nil { + ko.Spec.Name = resp.Name + } else { + ko.Spec.Name = nil + } + + rm.setStatusDefaults(ko) + return &resource{ko}, nil +} + +// newUpdateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Update API call for the resource +func (rm *resourceManager) newUpdateRequestPayload( + ctx context.Context, + r *resource, + delta *ackcompare.Delta, +) (*svcsdk.UpdatePipeInput, error) { + res := &svcsdk.UpdatePipeInput{} + + if r.ko.Spec.Description != nil { + res.SetDescription(*r.ko.Spec.Description) + } + if r.ko.Spec.DesiredState != nil { + res.SetDesiredState(*r.ko.Spec.DesiredState) + } + if r.ko.Spec.Enrichment != nil { + res.SetEnrichment(*r.ko.Spec.Enrichment) + } + if r.ko.Spec.EnrichmentParameters != nil { + f3 := &svcsdk.PipeEnrichmentParameters{} + if r.ko.Spec.EnrichmentParameters.HTTPParameters != nil { + f3f0 := &svcsdk.PipeEnrichmentHttpParameters{} + if r.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters != nil { + f3f0f0 := map[string]*string{} + for f3f0f0key, f3f0f0valiter := range r.ko.Spec.EnrichmentParameters.HTTPParameters.HeaderParameters { + var f3f0f0val string + f3f0f0val = *f3f0f0valiter + f3f0f0[f3f0f0key] = &f3f0f0val + } + f3f0.SetHeaderParameters(f3f0f0) + } + if r.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues != nil { + f3f0f1 := []*string{} + for _, f3f0f1iter := range r.ko.Spec.EnrichmentParameters.HTTPParameters.PathParameterValues { + var f3f0f1elem string + f3f0f1elem = *f3f0f1iter + f3f0f1 = append(f3f0f1, &f3f0f1elem) + } + f3f0.SetPathParameterValues(f3f0f1) + } + if r.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters != nil { + f3f0f2 := map[string]*string{} + for f3f0f2key, f3f0f2valiter := range r.ko.Spec.EnrichmentParameters.HTTPParameters.QueryStringParameters { + var f3f0f2val string + f3f0f2val = *f3f0f2valiter + f3f0f2[f3f0f2key] = &f3f0f2val + } + f3f0.SetQueryStringParameters(f3f0f2) + } + f3.SetHttpParameters(f3f0) + } + if r.ko.Spec.EnrichmentParameters.InputTemplate != nil { + f3.SetInputTemplate(*r.ko.Spec.EnrichmentParameters.InputTemplate) + } + res.SetEnrichmentParameters(f3) + } + if r.ko.Spec.Name != nil { + res.SetName(*r.ko.Spec.Name) + } + if r.ko.Spec.RoleARN != nil { + res.SetRoleArn(*r.ko.Spec.RoleARN) + } + if r.ko.Spec.SourceParameters != nil { + f6 := &svcsdk.UpdatePipeSourceParameters{} + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters != nil { + f6f0 := &svcsdk.UpdatePipeSourceActiveMQBrokerParameters{} + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize != nil { + f6f0.SetBatchSize(*r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials != nil { + f6f0f1 := &svcsdk.MQBrokerAccessCredentials{} + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth != nil { + f6f0f1.SetBasicAuth(*r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.Credentials.BasicAuth) + } + f6f0.SetCredentials(f6f0f1) + } + if r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + f6f0.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.ActiveMQBrokerParameters.MaximumBatchingWindowInSeconds) + } + f6.SetActiveMQBrokerParameters(f6f0) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters != nil { + f6f1 := &svcsdk.UpdatePipeSourceDynamoDBStreamParameters{} + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize != nil { + f6f1.SetBatchSize(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig != nil { + f6f1f1 := &svcsdk.DeadLetterConfig{} + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN != nil { + f6f1f1.SetArn(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.DeadLetterConfig.ARN) + } + f6f1.SetDeadLetterConfig(f6f1f1) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds != nil { + f6f1.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds != nil { + f6f1.SetMaximumRecordAgeInSeconds(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRecordAgeInSeconds) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts != nil { + f6f1.SetMaximumRetryAttempts(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.MaximumRetryAttempts) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure != nil { + f6f1.SetOnPartialBatchItemFailure(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.OnPartialBatchItemFailure) + } + if r.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor != nil { + f6f1.SetParallelizationFactor(*r.ko.Spec.SourceParameters.DynamoDBStreamParameters.ParallelizationFactor) + } + f6.SetDynamoDBStreamParameters(f6f1) + } + if r.ko.Spec.SourceParameters.FilterCriteria != nil { + f6f2 := &svcsdk.FilterCriteria{} + if r.ko.Spec.SourceParameters.FilterCriteria.Filters != nil { + f6f2f0 := []*svcsdk.Filter{} + for _, f6f2f0iter := range r.ko.Spec.SourceParameters.FilterCriteria.Filters { + f6f2f0elem := &svcsdk.Filter{} + if f6f2f0iter.Pattern != nil { + f6f2f0elem.SetPattern(*f6f2f0iter.Pattern) + } + f6f2f0 = append(f6f2f0, f6f2f0elem) + } + f6f2.SetFilters(f6f2f0) + } + f6.SetFilterCriteria(f6f2) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters != nil { + f6f3 := &svcsdk.UpdatePipeSourceKinesisStreamParameters{} + if r.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize != nil { + f6f3.SetBatchSize(*r.ko.Spec.SourceParameters.KinesisStreamParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig != nil { + f6f3f1 := &svcsdk.DeadLetterConfig{} + if r.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN != nil { + f6f3f1.SetArn(*r.ko.Spec.SourceParameters.KinesisStreamParameters.DeadLetterConfig.ARN) + } + f6f3.SetDeadLetterConfig(f6f3f1) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds != nil { + f6f3.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds != nil { + f6f3.SetMaximumRecordAgeInSeconds(*r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRecordAgeInSeconds) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts != nil { + f6f3.SetMaximumRetryAttempts(*r.ko.Spec.SourceParameters.KinesisStreamParameters.MaximumRetryAttempts) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure != nil { + f6f3.SetOnPartialBatchItemFailure(*r.ko.Spec.SourceParameters.KinesisStreamParameters.OnPartialBatchItemFailure) + } + if r.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor != nil { + f6f3.SetParallelizationFactor(*r.ko.Spec.SourceParameters.KinesisStreamParameters.ParallelizationFactor) + } + f6.SetKinesisStreamParameters(f6f3) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters != nil { + f6f4 := &svcsdk.UpdatePipeSourceManagedStreamingKafkaParameters{} + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize != nil { + f6f4.SetBatchSize(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials != nil { + f6f4f1 := &svcsdk.MSKAccessCredentials{} + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth != nil { + f6f4f1.SetClientCertificateTlsAuth(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.ClientCertificateTLSAuth) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth != nil { + f6f4f1.SetSaslScram512Auth(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.Credentials.SASLSCRAM512Auth) + } + f6f4.SetCredentials(f6f4f1) + } + if r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds != nil { + f6f4.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.ManagedStreamingKafkaParameters.MaximumBatchingWindowInSeconds) + } + f6.SetManagedStreamingKafkaParameters(f6f4) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters != nil { + f6f5 := &svcsdk.UpdatePipeSourceRabbitMQBrokerParameters{} + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize != nil { + f6f5.SetBatchSize(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials != nil { + f6f5f1 := &svcsdk.MQBrokerAccessCredentials{} + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth != nil { + f6f5f1.SetBasicAuth(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.Credentials.BasicAuth) + } + f6f5.SetCredentials(f6f5f1) + } + if r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds != nil { + f6f5.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.RabbitMQBrokerParameters.MaximumBatchingWindowInSeconds) + } + f6.SetRabbitMQBrokerParameters(f6f5) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters != nil { + f6f6 := &svcsdk.UpdatePipeSourceSelfManagedKafkaParameters{} + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize != nil { + f6f6.SetBatchSize(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials != nil { + f6f6f1 := &svcsdk.SelfManagedKafkaAccessConfigurationCredentials{} + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth != nil { + f6f6f1.SetBasicAuth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.BasicAuth) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth != nil { + f6f6f1.SetClientCertificateTlsAuth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.ClientCertificateTLSAuth) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth != nil { + f6f6f1.SetSaslScram256Auth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM256Auth) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth != nil { + f6f6f1.SetSaslScram512Auth(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.Credentials.SASLSCRAM512Auth) + } + f6f6.SetCredentials(f6f6f1) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds != nil { + f6f6.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.MaximumBatchingWindowInSeconds) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate != nil { + f6f6.SetServerRootCaCertificate(*r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.ServerRootCaCertificate) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC != nil { + f6f6f4 := &svcsdk.SelfManagedKafkaAccessConfigurationVpc{} + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup != nil { + f6f6f4f0 := []*string{} + for _, f6f6f4f0iter := range r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.SecurityGroup { + var f6f6f4f0elem string + f6f6f4f0elem = *f6f6f4f0iter + f6f6f4f0 = append(f6f6f4f0, &f6f6f4f0elem) + } + f6f6f4.SetSecurityGroup(f6f6f4f0) + } + if r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets != nil { + f6f6f4f1 := []*string{} + for _, f6f6f4f1iter := range r.ko.Spec.SourceParameters.SelfManagedKafkaParameters.VPC.Subnets { + var f6f6f4f1elem string + f6f6f4f1elem = *f6f6f4f1iter + f6f6f4f1 = append(f6f6f4f1, &f6f6f4f1elem) + } + f6f6f4.SetSubnets(f6f6f4f1) + } + f6f6.SetVpc(f6f6f4) + } + f6.SetSelfManagedKafkaParameters(f6f6) + } + if r.ko.Spec.SourceParameters.SQSQueueParameters != nil { + f6f7 := &svcsdk.UpdatePipeSourceSqsQueueParameters{} + if r.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize != nil { + f6f7.SetBatchSize(*r.ko.Spec.SourceParameters.SQSQueueParameters.BatchSize) + } + if r.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds != nil { + f6f7.SetMaximumBatchingWindowInSeconds(*r.ko.Spec.SourceParameters.SQSQueueParameters.MaximumBatchingWindowInSeconds) + } + f6.SetSqsQueueParameters(f6f7) + } + res.SetSourceParameters(f6) + } + if r.ko.Spec.Target != nil { + res.SetTarget(*r.ko.Spec.Target) + } + if r.ko.Spec.TargetParameters != nil { + f8 := &svcsdk.PipeTargetParameters{} + if r.ko.Spec.TargetParameters.BatchJobParameters != nil { + f8f0 := &svcsdk.PipeTargetBatchJobParameters{} + if r.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties != nil { + f8f0f0 := &svcsdk.BatchArrayProperties{} + if r.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size != nil { + f8f0f0.SetSize(*r.ko.Spec.TargetParameters.BatchJobParameters.ArrayProperties.Size) + } + f8f0.SetArrayProperties(f8f0f0) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides != nil { + f8f0f1 := &svcsdk.BatchContainerOverrides{} + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command != nil { + f8f0f1f0 := []*string{} + for _, f8f0f1f0iter := range r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Command { + var f8f0f1f0elem string + f8f0f1f0elem = *f8f0f1f0iter + f8f0f1f0 = append(f8f0f1f0, &f8f0f1f0elem) + } + f8f0f1.SetCommand(f8f0f1f0) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment != nil { + f8f0f1f1 := []*svcsdk.BatchEnvironmentVariable{} + for _, f8f0f1f1iter := range r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.Environment { + f8f0f1f1elem := &svcsdk.BatchEnvironmentVariable{} + if f8f0f1f1iter.Name != nil { + f8f0f1f1elem.SetName(*f8f0f1f1iter.Name) + } + if f8f0f1f1iter.Value != nil { + f8f0f1f1elem.SetValue(*f8f0f1f1iter.Value) + } + f8f0f1f1 = append(f8f0f1f1, f8f0f1f1elem) + } + f8f0f1.SetEnvironment(f8f0f1f1) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType != nil { + f8f0f1.SetInstanceType(*r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.InstanceType) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements != nil { + f8f0f1f3 := []*svcsdk.BatchResourceRequirement{} + for _, f8f0f1f3iter := range r.ko.Spec.TargetParameters.BatchJobParameters.ContainerOverrides.ResourceRequirements { + f8f0f1f3elem := &svcsdk.BatchResourceRequirement{} + if f8f0f1f3iter.Type != nil { + f8f0f1f3elem.SetType(*f8f0f1f3iter.Type) + } + if f8f0f1f3iter.Value != nil { + f8f0f1f3elem.SetValue(*f8f0f1f3iter.Value) + } + f8f0f1f3 = append(f8f0f1f3, f8f0f1f3elem) + } + f8f0f1.SetResourceRequirements(f8f0f1f3) + } + f8f0.SetContainerOverrides(f8f0f1) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.DependsOn != nil { + f8f0f2 := []*svcsdk.BatchJobDependency{} + for _, f8f0f2iter := range r.ko.Spec.TargetParameters.BatchJobParameters.DependsOn { + f8f0f2elem := &svcsdk.BatchJobDependency{} + if f8f0f2iter.JobID != nil { + f8f0f2elem.SetJobId(*f8f0f2iter.JobID) + } + if f8f0f2iter.Type != nil { + f8f0f2elem.SetType(*f8f0f2iter.Type) + } + f8f0f2 = append(f8f0f2, f8f0f2elem) + } + f8f0.SetDependsOn(f8f0f2) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition != nil { + f8f0.SetJobDefinition(*r.ko.Spec.TargetParameters.BatchJobParameters.JobDefinition) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.JobName != nil { + f8f0.SetJobName(*r.ko.Spec.TargetParameters.BatchJobParameters.JobName) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.Parameters != nil { + f8f0f5 := map[string]*string{} + for f8f0f5key, f8f0f5valiter := range r.ko.Spec.TargetParameters.BatchJobParameters.Parameters { + var f8f0f5val string + f8f0f5val = *f8f0f5valiter + f8f0f5[f8f0f5key] = &f8f0f5val + } + f8f0.SetParameters(f8f0f5) + } + if r.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy != nil { + f8f0f6 := &svcsdk.BatchRetryStrategy{} + if r.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts != nil { + f8f0f6.SetAttempts(*r.ko.Spec.TargetParameters.BatchJobParameters.RetryStrategy.Attempts) + } + f8f0.SetRetryStrategy(f8f0f6) + } + f8.SetBatchJobParameters(f8f0) + } + if r.ko.Spec.TargetParameters.CloudWatchLogsParameters != nil { + f8f1 := &svcsdk.PipeTargetCloudWatchLogsParameters{} + if r.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName != nil { + f8f1.SetLogStreamName(*r.ko.Spec.TargetParameters.CloudWatchLogsParameters.LogStreamName) + } + if r.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp != nil { + f8f1.SetTimestamp(*r.ko.Spec.TargetParameters.CloudWatchLogsParameters.Timestamp) + } + f8.SetCloudWatchLogsParameters(f8f1) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters != nil { + f8f2 := &svcsdk.PipeTargetEcsTaskParameters{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy != nil { + f8f2f0 := []*svcsdk.CapacityProviderStrategyItem{} + for _, f8f2f0iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.CapacityProviderStrategy { + f8f2f0elem := &svcsdk.CapacityProviderStrategyItem{} + if f8f2f0iter.Base != nil { + f8f2f0elem.SetBase(*f8f2f0iter.Base) + } + if f8f2f0iter.CapacityProvider != nil { + f8f2f0elem.SetCapacityProvider(*f8f2f0iter.CapacityProvider) + } + if f8f2f0iter.Weight != nil { + f8f2f0elem.SetWeight(*f8f2f0iter.Weight) + } + f8f2f0 = append(f8f2f0, f8f2f0elem) + } + f8f2.SetCapacityProviderStrategy(f8f2f0) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags != nil { + f8f2.SetEnableECSManagedTags(*r.ko.Spec.TargetParameters.ECSTaskParameters.EnableECSManagedTags) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand != nil { + f8f2.SetEnableExecuteCommand(*r.ko.Spec.TargetParameters.ECSTaskParameters.EnableExecuteCommand) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Group != nil { + f8f2.SetGroup(*r.ko.Spec.TargetParameters.ECSTaskParameters.Group) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType != nil { + f8f2.SetLaunchType(*r.ko.Spec.TargetParameters.ECSTaskParameters.LaunchType) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration != nil { + f8f2f5 := &svcsdk.NetworkConfiguration{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration != nil { + f8f2f5f0 := &svcsdk.AwsVpcConfiguration{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP != nil { + f8f2f5f0.SetAssignPublicIp(*r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.AssignPublicIP) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups != nil { + f8f2f5f0f1 := []*string{} + for _, f8f2f5f0f1iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.SecurityGroups { + var f8f2f5f0f1elem string + f8f2f5f0f1elem = *f8f2f5f0f1iter + f8f2f5f0f1 = append(f8f2f5f0f1, &f8f2f5f0f1elem) + } + f8f2f5f0.SetSecurityGroups(f8f2f5f0f1) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets != nil { + f8f2f5f0f2 := []*string{} + for _, f8f2f5f0f2iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.NetworkConfiguration.AWSVPCConfiguration.Subnets { + var f8f2f5f0f2elem string + f8f2f5f0f2elem = *f8f2f5f0f2iter + f8f2f5f0f2 = append(f8f2f5f0f2, &f8f2f5f0f2elem) + } + f8f2f5f0.SetSubnets(f8f2f5f0f2) + } + f8f2f5.SetAwsvpcConfiguration(f8f2f5f0) + } + f8f2.SetNetworkConfiguration(f8f2f5) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides != nil { + f8f2f6 := &svcsdk.EcsTaskOverride{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides != nil { + f8f2f6f0 := []*svcsdk.EcsContainerOverride{} + for _, f8f2f6f0iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ContainerOverrides { + f8f2f6f0elem := &svcsdk.EcsContainerOverride{} + if f8f2f6f0iter.Command != nil { + f8f2f6f0elemf0 := []*string{} + for _, f8f2f6f0elemf0iter := range f8f2f6f0iter.Command { + var f8f2f6f0elemf0elem string + f8f2f6f0elemf0elem = *f8f2f6f0elemf0iter + f8f2f6f0elemf0 = append(f8f2f6f0elemf0, &f8f2f6f0elemf0elem) + } + f8f2f6f0elem.SetCommand(f8f2f6f0elemf0) + } + if f8f2f6f0iter.CPU != nil { + f8f2f6f0elem.SetCpu(*f8f2f6f0iter.CPU) + } + if f8f2f6f0iter.Environment != nil { + f8f2f6f0elemf2 := []*svcsdk.EcsEnvironmentVariable{} + for _, f8f2f6f0elemf2iter := range f8f2f6f0iter.Environment { + f8f2f6f0elemf2elem := &svcsdk.EcsEnvironmentVariable{} + if f8f2f6f0elemf2iter.Name != nil { + f8f2f6f0elemf2elem.SetName(*f8f2f6f0elemf2iter.Name) + } + if f8f2f6f0elemf2iter.Value != nil { + f8f2f6f0elemf2elem.SetValue(*f8f2f6f0elemf2iter.Value) + } + f8f2f6f0elemf2 = append(f8f2f6f0elemf2, f8f2f6f0elemf2elem) + } + f8f2f6f0elem.SetEnvironment(f8f2f6f0elemf2) + } + if f8f2f6f0iter.EnvironmentFiles != nil { + f8f2f6f0elemf3 := []*svcsdk.EcsEnvironmentFile{} + for _, f8f2f6f0elemf3iter := range f8f2f6f0iter.EnvironmentFiles { + f8f2f6f0elemf3elem := &svcsdk.EcsEnvironmentFile{} + if f8f2f6f0elemf3iter.Type != nil { + f8f2f6f0elemf3elem.SetType(*f8f2f6f0elemf3iter.Type) + } + if f8f2f6f0elemf3iter.Value != nil { + f8f2f6f0elemf3elem.SetValue(*f8f2f6f0elemf3iter.Value) + } + f8f2f6f0elemf3 = append(f8f2f6f0elemf3, f8f2f6f0elemf3elem) + } + f8f2f6f0elem.SetEnvironmentFiles(f8f2f6f0elemf3) + } + if f8f2f6f0iter.Memory != nil { + f8f2f6f0elem.SetMemory(*f8f2f6f0iter.Memory) + } + if f8f2f6f0iter.MemoryReservation != nil { + f8f2f6f0elem.SetMemoryReservation(*f8f2f6f0iter.MemoryReservation) + } + if f8f2f6f0iter.Name != nil { + f8f2f6f0elem.SetName(*f8f2f6f0iter.Name) + } + if f8f2f6f0iter.ResourceRequirements != nil { + f8f2f6f0elemf7 := []*svcsdk.EcsResourceRequirement{} + for _, f8f2f6f0elemf7iter := range f8f2f6f0iter.ResourceRequirements { + f8f2f6f0elemf7elem := &svcsdk.EcsResourceRequirement{} + if f8f2f6f0elemf7iter.Type != nil { + f8f2f6f0elemf7elem.SetType(*f8f2f6f0elemf7iter.Type) + } + if f8f2f6f0elemf7iter.Value != nil { + f8f2f6f0elemf7elem.SetValue(*f8f2f6f0elemf7iter.Value) + } + f8f2f6f0elemf7 = append(f8f2f6f0elemf7, f8f2f6f0elemf7elem) + } + f8f2f6f0elem.SetResourceRequirements(f8f2f6f0elemf7) + } + f8f2f6f0 = append(f8f2f6f0, f8f2f6f0elem) + } + f8f2f6.SetContainerOverrides(f8f2f6f0) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU != nil { + f8f2f6.SetCpu(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.CPU) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage != nil { + f8f2f6f2 := &svcsdk.EcsEphemeralStorage{} + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB != nil { + f8f2f6f2.SetSizeInGiB(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.EphemeralStorage.SizeInGiB) + } + f8f2f6.SetEphemeralStorage(f8f2f6f2) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN != nil { + f8f2f6.SetExecutionRoleArn(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.ExecutionRoleARN) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides != nil { + f8f2f6f4 := []*svcsdk.EcsInferenceAcceleratorOverride{} + for _, f8f2f6f4iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.InferenceAcceleratorOverrides { + f8f2f6f4elem := &svcsdk.EcsInferenceAcceleratorOverride{} + if f8f2f6f4iter.DeviceName != nil { + f8f2f6f4elem.SetDeviceName(*f8f2f6f4iter.DeviceName) + } + if f8f2f6f4iter.DeviceType != nil { + f8f2f6f4elem.SetDeviceType(*f8f2f6f4iter.DeviceType) + } + f8f2f6f4 = append(f8f2f6f4, f8f2f6f4elem) + } + f8f2f6.SetInferenceAcceleratorOverrides(f8f2f6f4) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory != nil { + f8f2f6.SetMemory(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.Memory) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN != nil { + f8f2f6.SetTaskRoleArn(*r.ko.Spec.TargetParameters.ECSTaskParameters.Overrides.TaskRoleARN) + } + f8f2.SetOverrides(f8f2f6) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints != nil { + f8f2f7 := []*svcsdk.PlacementConstraint{} + for _, f8f2f7iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementConstraints { + f8f2f7elem := &svcsdk.PlacementConstraint{} + if f8f2f7iter.Expression != nil { + f8f2f7elem.SetExpression(*f8f2f7iter.Expression) + } + if f8f2f7iter.Type != nil { + f8f2f7elem.SetType(*f8f2f7iter.Type) + } + f8f2f7 = append(f8f2f7, f8f2f7elem) + } + f8f2.SetPlacementConstraints(f8f2f7) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy != nil { + f8f2f8 := []*svcsdk.PlacementStrategy{} + for _, f8f2f8iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.PlacementStrategy { + f8f2f8elem := &svcsdk.PlacementStrategy{} + if f8f2f8iter.Field != nil { + f8f2f8elem.SetField(*f8f2f8iter.Field) + } + if f8f2f8iter.Type != nil { + f8f2f8elem.SetType(*f8f2f8iter.Type) + } + f8f2f8 = append(f8f2f8, f8f2f8elem) + } + f8f2.SetPlacementStrategy(f8f2f8) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion != nil { + f8f2.SetPlatformVersion(*r.ko.Spec.TargetParameters.ECSTaskParameters.PlatformVersion) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags != nil { + f8f2.SetPropagateTags(*r.ko.Spec.TargetParameters.ECSTaskParameters.PropagateTags) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID != nil { + f8f2.SetReferenceId(*r.ko.Spec.TargetParameters.ECSTaskParameters.ReferenceID) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.Tags != nil { + f8f2f12 := []*svcsdk.Tag{} + for _, f8f2f12iter := range r.ko.Spec.TargetParameters.ECSTaskParameters.Tags { + f8f2f12elem := &svcsdk.Tag{} + if f8f2f12iter.Key != nil { + f8f2f12elem.SetKey(*f8f2f12iter.Key) + } + if f8f2f12iter.Value != nil { + f8f2f12elem.SetValue(*f8f2f12iter.Value) + } + f8f2f12 = append(f8f2f12, f8f2f12elem) + } + f8f2.SetTags(f8f2f12) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount != nil { + f8f2.SetTaskCount(*r.ko.Spec.TargetParameters.ECSTaskParameters.TaskCount) + } + if r.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN != nil { + f8f2.SetTaskDefinitionArn(*r.ko.Spec.TargetParameters.ECSTaskParameters.TaskDefinitionARN) + } + f8.SetEcsTaskParameters(f8f2) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters != nil { + f8f3 := &svcsdk.PipeTargetEventBridgeEventBusParameters{} + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType != nil { + f8f3.SetDetailType(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.DetailType) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID != nil { + f8f3.SetEndpointId(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.EndpointID) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources != nil { + f8f3f2 := []*string{} + for _, f8f3f2iter := range r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Resources { + var f8f3f2elem string + f8f3f2elem = *f8f3f2iter + f8f3f2 = append(f8f3f2, &f8f3f2elem) + } + f8f3.SetResources(f8f3f2) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source != nil { + f8f3.SetSource(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Source) + } + if r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time != nil { + f8f3.SetTime(*r.ko.Spec.TargetParameters.EventBridgeEventBusParameters.Time) + } + f8.SetEventBridgeEventBusParameters(f8f3) + } + if r.ko.Spec.TargetParameters.HTTPParameters != nil { + f8f4 := &svcsdk.PipeTargetHttpParameters{} + if r.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters != nil { + f8f4f0 := map[string]*string{} + for f8f4f0key, f8f4f0valiter := range r.ko.Spec.TargetParameters.HTTPParameters.HeaderParameters { + var f8f4f0val string + f8f4f0val = *f8f4f0valiter + f8f4f0[f8f4f0key] = &f8f4f0val + } + f8f4.SetHeaderParameters(f8f4f0) + } + if r.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues != nil { + f8f4f1 := []*string{} + for _, f8f4f1iter := range r.ko.Spec.TargetParameters.HTTPParameters.PathParameterValues { + var f8f4f1elem string + f8f4f1elem = *f8f4f1iter + f8f4f1 = append(f8f4f1, &f8f4f1elem) + } + f8f4.SetPathParameterValues(f8f4f1) + } + if r.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters != nil { + f8f4f2 := map[string]*string{} + for f8f4f2key, f8f4f2valiter := range r.ko.Spec.TargetParameters.HTTPParameters.QueryStringParameters { + var f8f4f2val string + f8f4f2val = *f8f4f2valiter + f8f4f2[f8f4f2key] = &f8f4f2val + } + f8f4.SetQueryStringParameters(f8f4f2) + } + f8.SetHttpParameters(f8f4) + } + if r.ko.Spec.TargetParameters.InputTemplate != nil { + f8.SetInputTemplate(*r.ko.Spec.TargetParameters.InputTemplate) + } + if r.ko.Spec.TargetParameters.KinesisStreamParameters != nil { + f8f6 := &svcsdk.PipeTargetKinesisStreamParameters{} + if r.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey != nil { + f8f6.SetPartitionKey(*r.ko.Spec.TargetParameters.KinesisStreamParameters.PartitionKey) + } + f8.SetKinesisStreamParameters(f8f6) + } + if r.ko.Spec.TargetParameters.LambdaFunctionParameters != nil { + f8f7 := &svcsdk.PipeTargetLambdaFunctionParameters{} + if r.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType != nil { + f8f7.SetInvocationType(*r.ko.Spec.TargetParameters.LambdaFunctionParameters.InvocationType) + } + f8.SetLambdaFunctionParameters(f8f7) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters != nil { + f8f8 := &svcsdk.PipeTargetRedshiftDataParameters{} + if r.ko.Spec.TargetParameters.RedshiftDataParameters.Database != nil { + f8f8.SetDatabase(*r.ko.Spec.TargetParameters.RedshiftDataParameters.Database) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser != nil { + f8f8.SetDbUser(*r.ko.Spec.TargetParameters.RedshiftDataParameters.DBUser) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN != nil { + f8f8.SetSecretManagerArn(*r.ko.Spec.TargetParameters.RedshiftDataParameters.SecretManagerARN) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs != nil { + f8f8f3 := []*string{} + for _, f8f8f3iter := range r.ko.Spec.TargetParameters.RedshiftDataParameters.SQLs { + var f8f8f3elem string + f8f8f3elem = *f8f8f3iter + f8f8f3 = append(f8f8f3, &f8f8f3elem) + } + f8f8.SetSqls(f8f8f3) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName != nil { + f8f8.SetStatementName(*r.ko.Spec.TargetParameters.RedshiftDataParameters.StatementName) + } + if r.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent != nil { + f8f8.SetWithEvent(*r.ko.Spec.TargetParameters.RedshiftDataParameters.WithEvent) + } + f8.SetRedshiftDataParameters(f8f8) + } + if r.ko.Spec.TargetParameters.SageMakerPipelineParameters != nil { + f8f9 := &svcsdk.PipeTargetSageMakerPipelineParameters{} + if r.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList != nil { + f8f9f0 := []*svcsdk.SageMakerPipelineParameter{} + for _, f8f9f0iter := range r.ko.Spec.TargetParameters.SageMakerPipelineParameters.PipelineParameterList { + f8f9f0elem := &svcsdk.SageMakerPipelineParameter{} + if f8f9f0iter.Name != nil { + f8f9f0elem.SetName(*f8f9f0iter.Name) + } + if f8f9f0iter.Value != nil { + f8f9f0elem.SetValue(*f8f9f0iter.Value) + } + f8f9f0 = append(f8f9f0, f8f9f0elem) + } + f8f9.SetPipelineParameterList(f8f9f0) + } + f8.SetSageMakerPipelineParameters(f8f9) + } + if r.ko.Spec.TargetParameters.SQSQueueParameters != nil { + f8f10 := &svcsdk.PipeTargetSqsQueueParameters{} + if r.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID != nil { + f8f10.SetMessageDeduplicationId(*r.ko.Spec.TargetParameters.SQSQueueParameters.MessageDeduplicationID) + } + if r.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID != nil { + f8f10.SetMessageGroupId(*r.ko.Spec.TargetParameters.SQSQueueParameters.MessageGroupID) + } + f8.SetSqsQueueParameters(f8f10) + } + if r.ko.Spec.TargetParameters.StepFunctionStateMachineParameters != nil { + f8f11 := &svcsdk.PipeTargetStateMachineParameters{} + if r.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType != nil { + f8f11.SetInvocationType(*r.ko.Spec.TargetParameters.StepFunctionStateMachineParameters.InvocationType) + } + f8.SetStepFunctionStateMachineParameters(f8f11) + } + res.SetTargetParameters(f8) + } + + return res, nil +} + +// sdkDelete deletes the supplied resource in the backend AWS service API +func (rm *resourceManager) sdkDelete( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkDelete") + defer func() { + exit(err) + }() + input, err := rm.newDeleteRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DeletePipeOutput + _ = resp + resp, err = rm.sdkapi.DeletePipeWithContext(ctx, input) + rm.metrics.RecordAPICall("DELETE", "DeletePipe", err) + // always requeue if API call succeeded due to eventually consistent state + // transitions + if err == nil { + return r, requeueWaitWhileDeleting + } + + return nil, err +} + +// newDeleteRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Delete API call for the resource +func (rm *resourceManager) newDeleteRequestPayload( + r *resource, +) (*svcsdk.DeletePipeInput, error) { + res := &svcsdk.DeletePipeInput{} + + if r.ko.Spec.Name != nil { + res.SetName(*r.ko.Spec.Name) + } + + return res, nil +} + +// setStatusDefaults sets default properties into supplied custom resource +func (rm *resourceManager) setStatusDefaults( + ko *svcapitypes.Pipe, +) { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if ko.Status.ACKResourceMetadata.Region == nil { + ko.Status.ACKResourceMetadata.Region = &rm.awsRegion + } + if ko.Status.ACKResourceMetadata.OwnerAccountID == nil { + ko.Status.ACKResourceMetadata.OwnerAccountID = &rm.awsAccountID + } + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } +} + +// updateConditions returns updated resource, true; if conditions were updated +// else it returns nil, false +func (rm *resourceManager) updateConditions( + r *resource, + onSuccess bool, + err error, +) (*resource, bool) { + ko := r.ko.DeepCopy() + rm.setStatusDefaults(ko) + + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + var recoverableCondition *ackv1alpha1.Condition = nil + var syncCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeRecoverable { + recoverableCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + syncCondition = condition + } + } + var termError *ackerr.TerminalError + if rm.terminalAWSError(err) || err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + var errorMessage = "" + if err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + errorMessage = err.Error() + } else { + awsErr, _ := ackerr.AWSError(err) + errorMessage = awsErr.Error() + } + terminalCondition.Status = corev1.ConditionTrue + terminalCondition.Message = &errorMessage + } else { + // Clear the terminal condition if no longer present + if terminalCondition != nil { + terminalCondition.Status = corev1.ConditionFalse + terminalCondition.Message = nil + } + // Handling Recoverable Conditions + if err != nil { + if recoverableCondition == nil { + // Add a new Condition containing a non-terminal error + recoverableCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeRecoverable, + } + ko.Status.Conditions = append(ko.Status.Conditions, recoverableCondition) + } + recoverableCondition.Status = corev1.ConditionTrue + awsErr, _ := ackerr.AWSError(err) + errorMessage := err.Error() + if awsErr != nil { + errorMessage = awsErr.Error() + } + recoverableCondition.Message = &errorMessage + } else if recoverableCondition != nil { + recoverableCondition.Status = corev1.ConditionFalse + recoverableCondition.Message = nil + } + } + // Required to avoid the "declared but not used" error in the default case + _ = syncCondition + if terminalCondition != nil || recoverableCondition != nil || syncCondition != nil { + return &resource{ko}, true // updated + } + return nil, false // not updated +} + +// terminalAWSError returns awserr, true; if the supplied error is an aws Error type +// and if the exception indicates that it is a Terminal exception +// 'Terminal' exception are specified in generator configuration +func (rm *resourceManager) terminalAWSError(err error) bool { + if err == nil { + return false + } + awsErr, ok := ackerr.AWSError(err) + if !ok { + return false + } + switch awsErr.Code() { + case "ValidationException": + return true + default: + return false + } +} diff --git a/pkg/resource/pipe/tags.go b/pkg/resource/pipe/tags.go new file mode 100644 index 0000000..5a746e9 --- /dev/null +++ b/pkg/resource/pipe/tags.go @@ -0,0 +1,59 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package pipe + +import ( + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + + svcapitypes "github.com/aws-controllers-k8s/pipes-controller/apis/v1alpha1" +) + +var ( + _ = svcapitypes.Pipe{} + _ = acktags.NewTags() +) + +// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// This method helps in creating the hub(acktags.Tags) for merging +// default controller tags with existing resource tags. +func ToACKTags(tags map[string]*string) acktags.Tags { + result := acktags.NewTags() + if tags == nil || len(tags) == 0 { + return result + } + + for k, v := range tags { + if v == nil { + result[k] = "" + } else { + result[k] = *v + } + } + + return result +} + +// FromACKTags converts the tags parameter into map[string]*string shape. +// This method helps in setting the tags back inside AWSResource after merging +// default controller tags with existing resource tags. +func FromACKTags(tags acktags.Tags) map[string]*string { + result := map[string]*string{} + for k, v := range tags { + vCopy := v + result[k] = &vCopy + } + return result +} diff --git a/templates/hooks/pipe/sdk_create_post_set_output.go.tpl b/templates/hooks/pipe/sdk_create_post_set_output.go.tpl new file mode 100644 index 0000000..00f4a0d --- /dev/null +++ b/templates/hooks/pipe/sdk_create_post_set_output.go.tpl @@ -0,0 +1,3 @@ +if !pipeAvailable(&resource{ko}) { + return &resource{ko}, requeueWaitWhileCreating +} \ No newline at end of file diff --git a/templates/hooks/pipe/sdk_delete_post_request.go.tpl b/templates/hooks/pipe/sdk_delete_post_request.go.tpl new file mode 100644 index 0000000..54a7980 --- /dev/null +++ b/templates/hooks/pipe/sdk_delete_post_request.go.tpl @@ -0,0 +1,5 @@ +// always requeue if API call succeeded due to eventually consistent state +// transitions +if err == nil { + return r, requeueWaitWhileDeleting +} diff --git a/templates/hooks/pipe/sdk_update_post_build_request.go.tpl b/templates/hooks/pipe/sdk_update_post_build_request.go.tpl new file mode 100644 index 0000000..9105794 --- /dev/null +++ b/templates/hooks/pipe/sdk_update_post_build_request.go.tpl @@ -0,0 +1,2 @@ +// we need to explicitly unset nil spec values +unsetRemovedSpecFields(delta, desired.ko.Spec, input) diff --git a/templates/hooks/pipe/sdk_update_pre_build_request.go.tpl b/templates/hooks/pipe/sdk_update_pre_build_request.go.tpl new file mode 100644 index 0000000..bd44412 --- /dev/null +++ b/templates/hooks/pipe/sdk_update_pre_build_request.go.tpl @@ -0,0 +1,23 @@ +if pipeInMutatingState(latest) { + return latest, requeueWaitWhileUpdating +} + +// hack (continued from delta.go): if there is only a difference in the current +// and desired state (expressed through non-existing Spec field CurrentState, +// continuously requeue so we don't block changes to the resources to recover +// from a FAILED state +if !delta.DifferentExcept("Spec.CurrentState") { + return latest, requeueWaitWhileUpdating +} + +if delta.DifferentAt("Spec.Tags") { + err = rm.updatePipeTags(ctx, latest, desired) + if err != nil { + return nil, err + } +} + +// If no other differences were observe, avoid making UpdatePipe API calls. +if !delta.DifferentExcept("Spec.Tags") { + return desired, nil +} \ No newline at end of file diff --git a/templates/hooks/pipe/sdk_update_pre_set_output.go.tpl b/templates/hooks/pipe/sdk_update_pre_set_output.go.tpl new file mode 100644 index 0000000..4eb38a8 --- /dev/null +++ b/templates/hooks/pipe/sdk_update_pre_set_output.go.tpl @@ -0,0 +1,6 @@ + +// always requeue with desired state and return immediately due to eventually +// consistent API +return desired, ackrequeue.NeededAfter(nil, defaultRequeueDelay) + +// TODO (@embano1): we can't tell code-gen to not generate the rest of the code diff --git a/test/e2e/bootstrap_resources.py b/test/e2e/bootstrap_resources.py index 450a769..e5c2013 100644 --- a/test/e2e/bootstrap_resources.py +++ b/test/e2e/bootstrap_resources.py @@ -17,11 +17,16 @@ from dataclasses import dataclass from acktest.bootstrapping import Resources +from acktest.bootstrapping.iam import Role +from acktest.bootstrapping.sqs import Queue + from e2e import bootstrap_directory @dataclass class BootstrapResources(Resources): - pass + PipeRole: Role + SourceQueue: Queue + TargetQueue: Queue _bootstrap_resources = None diff --git a/test/e2e/requirements.txt b/test/e2e/requirements.txt index 72941cf..5ed9b76 100644 --- a/test/e2e/requirements.txt +++ b/test/e2e/requirements.txt @@ -1 +1 @@ -acktest @ git+https://github.com/aws-controllers-k8s/test-infra.git@13e57a378a8369fe4e0e9b1bf91be23584b0cd83 +acktest @ git+https://github.com/aws-controllers-k8s/test-infra.git@3fed924a7a4071723b942fc33e5c1e22cec089dd diff --git a/test/e2e/resources/pipe.yaml b/test/e2e/resources/pipe.yaml new file mode 100644 index 0000000..80307c0 --- /dev/null +++ b/test/e2e/resources/pipe.yaml @@ -0,0 +1,15 @@ +apiVersion: pipes.services.k8s.aws/v1alpha1 +kind: Pipe +metadata: + name: $PIPE_NAME +spec: + name: $PIPE_NAME + description: testing pipe created ACK + roleARN: $PIPE_ROLE_ARN + source: $SOURCE_ARN + target: $TARGET_ARN + sourceParameters: + sqsQueueParameters: + batchSize: 10 + tags: + env: testing \ No newline at end of file diff --git a/test/e2e/service_bootstrap.py b/test/e2e/service_bootstrap.py index 91e5b96..71885af 100644 --- a/test/e2e/service_bootstrap.py +++ b/test/e2e/service_bootstrap.py @@ -15,6 +15,8 @@ import logging from acktest.bootstrapping import Resources, BootstrapFailureException +from acktest.bootstrapping.iam import Role +from acktest.bootstrapping.sqs import Queue from e2e import bootstrap_directory from e2e.bootstrap_resources import BootstrapResources @@ -23,7 +25,17 @@ def service_bootstrap() -> Resources: logging.getLogger().setLevel(logging.INFO) resources = BootstrapResources( - # TODO: Add bootstrapping when you have defined the resources + PipeRole=Role( + "ack-test-pipe-role", + "pipes.amazonaws.com", + managed_policies=["arn:aws:iam::aws:policy/AmazonSQSFullAccess"], + ), + SourceQueue=Queue( + "ack-pipes-controller-source-queue" + ), + TargetQueue=Queue( + "ack-pipes-controller-target-queue" + ), ) try: diff --git a/test/e2e/tests/helper.py b/test/e2e/tests/helper.py new file mode 100644 index 0000000..f05fdfb --- /dev/null +++ b/test/e2e/tests/helper.py @@ -0,0 +1,41 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Helper functions for EventBridgePipes e2e tests +""" + +import logging + +class PipesValidator: + def __init__(self, pipes_client): + self.pipes_client = pipes_client + + def get_pipe(self, pipe_name: str) -> dict: + try: + resp = self.pipes_client.describe_pipe( + Name=pipe_name + ) + return resp + + except Exception as e: + logging.debug(e) + return None + + def pipe_exists(self, pipe_name) -> bool: + return self.get_pipe(pipe_name) is not None + + def get_resource_tags(self, resource_arn: str): + resource_tags = self.pipes_client.list_tags_for_resource( + resourceArn=resource_arn, + ) + return resource_tags['tags'] \ No newline at end of file diff --git a/test/e2e/tests/test_pipe.py b/test/e2e/tests/test_pipe.py new file mode 100644 index 0000000..90eac57 --- /dev/null +++ b/test/e2e/tests/test_pipe.py @@ -0,0 +1,179 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Integration tests for the EventBridgePipes Pipe resource +""" + +import logging +import time +from typing import Dict + +import pytest + +from acktest import tags +from acktest.k8s import resource as k8s +from acktest.resources import random_suffix_name +from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_pipes_resource +from e2e.replacement_values import REPLACEMENT_VALUES +from e2e.bootstrap_resources import get_bootstrap_resources +from e2e.tests.helper import PipesValidator + +RESOURCE_PLURAL = "pipes" + +CREATE_WAIT_AFTER_SECONDS = 45 +UPDATE_WAIT_AFTER_SECONDS = 75 +DELETE_WAIT_AFTER_SECONDS = 60 + +@pytest.fixture(scope="module") +def simple_pipe(): + resource_name = random_suffix_name("ack-test-pipe", 24) + + resources = get_bootstrap_resources() + replacements = REPLACEMENT_VALUES.copy() + replacements["PIPE_NAME"] = resource_name + replacements["PIPE_ROLE_ARN"] = resources.PipeRole.arn + replacements["SOURCE_ARN"] = resources.SourceQueue.arn + replacements["TARGET_ARN"] = resources.TargetQueue.arn + + resource_data = load_pipes_resource( + "pipe", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + # Create the k8s resource + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + resource_name, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + + time.sleep(CREATE_WAIT_AFTER_SECONDS) + + # Get latest pipe CR + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + assert k8s.get_resource_exists(ref) + + yield (ref, cr) + + # Try to delete, if doesn't already exist + try: + _, deleted = k8s.delete_custom_resource(ref, 15, 15) + assert deleted + except: + pass + +@service_marker +@pytest.mark.canary +class TestPipe: + def test_create_delete_with_tags(self, pipes_client, simple_pipe): + (ref, cr) = simple_pipe + + pipe_name = cr["spec"]["name"] + pipe_arn = cr["status"]["ackResourceMetadata"]["arn"] + + pipes_validator = PipesValidator(pipes_client) + # verify that pipe exists + assert pipes_validator.pipe_exists(pipe_name) + + # verify that pipe tags are created + pipe_tags = pipes_validator.get_resource_tags(pipe_arn) + tags.assert_ack_system_tags( + tags=pipe_tags, + ) + tags.assert_equal_without_ack_tags( + expected=cr["spec"]["tags"], + actual=pipe_tags, + ) + + # Delete k8s resource + _, deleted = k8s.delete_custom_resource(ref, 15, 15) + assert deleted is True + + time.sleep(DELETE_WAIT_AFTER_SECONDS) + + # Check pipe doesn't exist + assert not pipes_validator.pipe_exists(pipe_name) + + def test_simple_update(self, pipes_client, simple_pipe): + (ref, cr) = simple_pipe + + pipe_name = cr["spec"]["name"] + pipe_arn = cr["status"]["ackResourceMetadata"]["arn"] + + pipes_validator = PipesValidator(pipes_client) + # verify that pipe exists + assert pipes_validator.pipe_exists(pipe_name) + + + # New spec fields + cr["spec"]["tags"] = { + "env": "prod", + } + cr["spec"]["description"] = "testing pipe created ACK - updated" + + # Patch k8s resource + k8s.patch_custom_resource(ref, cr) + time.sleep(UPDATE_WAIT_AFTER_SECONDS) + + # verify that pipe description and tags are updated + pipe = pipes_validator.get_pipe(pipe_name) + assert pipe["Description"] == "testing pipe created ACK - updated" + + pipe_tags = pipes_validator.get_resource_tags(pipe_arn) + tags.assert_ack_system_tags( + tags=pipe_tags, + ) + tags.assert_equal_without_ack_tags( + expected=cr["spec"]["tags"], + actual=pipe_tags, + ) + + # Delete k8s resource + _, deleted = k8s.delete_custom_resource(ref, 15, 15) + assert deleted is True + + time.sleep(DELETE_WAIT_AFTER_SECONDS) + + # Check pipe doesn't exist + assert not pipes_validator.pipe_exists(pipe_name) + + def test_pipe_update_state(self, pipes_client, simple_pipe): + (ref, cr) = simple_pipe + + pipe_name = cr["spec"]["name"] + + pipes_validator = PipesValidator(pipes_client) + # verify that pipe exists + assert pipes_validator.pipe_exists(pipe_name) + + cr["spec"]["desiredState"] = "STOPPED" + + # Patch k8s resource + k8s.patch_custom_resource(ref, cr) + time.sleep(UPDATE_WAIT_AFTER_SECONDS) + + pipe = pipes_validator.get_pipe(pipe_name) + assert pipe["DesiredState"] == "STOPPED" + assert pipe["CurrentState"] == "STOPPED" + + # Delete k8s resource + _, deleted = k8s.delete_custom_resource(ref, 15, 15) + assert deleted is True + + time.sleep(DELETE_WAIT_AFTER_SECONDS) + + # Check pipe doesn't exist + assert not pipes_validator.pipe_exists(pipe_name) \ No newline at end of file