diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index 810cff13..ee63afdc 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2024-05-02T20:54:33Z" + build_date: "2024-06-28T14:33:25Z" build_hash: 14cef51778d471698018b6c38b604181a6948248 - go_version: go1.22.0 + go_version: go1.22.2 version: v0.34.0 -api_directory_checksum: bd34f72147706f1dbc990acf4a6c4f6615c1bddb +api_directory_checksum: 73afd1cf92f1261c45bbb52544adf4da5c6a7cd0 api_version: v1alpha1 -aws_sdk_go_version: v1.44.93 +aws_sdk_go_version: v1.49.0 generator_config_info: - file_checksum: 3f88502d4b7623890c8eff789285f98fa3d84553 + file_checksum: 3c359b3f45716af86c99ab2ea0f2ab50eeae5dc9 original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/cache_cluster.go b/apis/v1alpha1/cache_cluster.go new file mode 100644 index 00000000..aa6d48e2 --- /dev/null +++ b/apis/v1alpha1/cache_cluster.go @@ -0,0 +1,387 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CacheClusterSpec defines the desired state of CacheCluster. +// +// Contains all of the attributes of a specific cluster. +type CacheClusterSpec struct { + + // Specifies whether the nodes in this Memcached cluster are created in a single + // Availability Zone or created across multiple Availability Zones in the cluster's + // region. + // + // This parameter is only supported for Memcached clusters. + // + // If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + // assumes single-az mode. + AZMode *string `json:"azMode,omitempty"` + // Reserved parameter. The password used to access a password protected server. + // + // Password constraints: + // + // - Must be only printable ASCII characters. + // + // - Must be at least 16 characters and no more than 128 characters in length. + // + // - The only permitted printable special characters are !, &, #, $, ^, <, + // >, and -. Other printable special characters cannot be used in the AUTH + // token. + // + // For more information, see AUTH password (http://redis.io/commands/AUTH) at + // http://redis.io/commands/AUTH. + AuthToken *ackv1alpha1.SecretKeyReference `json:"authToken,omitempty"` + // If you are running Redis engine version 6.0 or later, set this parameter + // to yes if you want to opt-in to the next auto minor version upgrade campaign. + // This parameter is disabled for previous versions. + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` + // The node group (shard) identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // - A name must contain from 1 to 50 alphanumeric characters or hyphens. + // + // - The first character must be a letter. + // + // - A name cannot end with a hyphen or contain two consecutive hyphens. + // + // +kubebuilder:validation:Required + CacheClusterID *string `json:"cacheClusterID"` + // The compute and memory capacity of the nodes in the node group (shard). + // + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. + // + // - General purpose: Current generation: M7g node types: cache.m7g.large, + // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // M6g node types (available only for Redis engine version 5.0.6 onward and + // for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // T4g node types (available only for Redis engine version 5.0.6 onward and + // Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + // generation: (not recommended. Existing clusters are still supported but + // creation of new clusters is not supported for these types.) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // - Compute optimized: Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) C1 node types: cache.c1.xlarge + // + // - Memory optimized: Current generation: R7g node types: cache.r7g.large, + // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // R6g node types (available only for Redis engine version 5.0.6 onward and + // for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge + // + // Additional node type info + // + // - All current generation instance types are created in Amazon VPC by default. + // + // - Redis append-only files (AOF) are not supported for T1 or T2 instances. + // + // - Redis Multi-AZ with automatic failover is not supported on T1 instances. + // + // - Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. + CacheNodeType *string `json:"cacheNodeType,omitempty"` + // The name of the parameter group to associate with this cluster. If this argument + // is omitted, the default parameter group for the specified engine is used. + // You cannot use any parameter group which has cluster-enabled='yes' when creating + // a cluster. + CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` + CacheParameterGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheParameterGroupRef,omitempty"` + // A list of security group names to associate with this cluster. + // + // Use this parameter only when you are creating a cluster outside of an Amazon + // Virtual Private Cloud (Amazon VPC). + CacheSecurityGroupNames []*string `json:"cacheSecurityGroupNames,omitempty"` + // The name of the subnet group to be used for the cluster. + // + // Use this parameter only when you are creating a cluster in an Amazon Virtual + // Private Cloud (Amazon VPC). + // + // If you're going to launch your cluster in an Amazon VPC, you need to create + // a subnet group before you start creating a cluster. For more information, + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` + CacheSubnetGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheSubnetGroupRef,omitempty"` + // The name of the cache engine to be used for this cluster. + // + // Valid values for this parameter are: memcached | redis + Engine *string `json:"engine,omitempty"` + // The version number of the cache engine to be used for this cluster. To view + // the supported cache engine versions, use the DescribeCacheEngineVersions + // operation. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + // but you cannot downgrade to an earlier engine version. If you want to use + // an earlier engine version, you must delete the existing cluster or replication + // group and create it anew with the earlier engine version. + EngineVersion *string `json:"engineVersion,omitempty"` + // The network type you choose when modifying a cluster, either ipv4 | ipv6. + // IPv6 is supported for workloads using Redis engine version 6.2 onward or + // Memcached engine version 1.6.6 on all instances built on the Nitro system + // (http://aws.amazon.com/ec2/nitro/). + IPDiscovery *string `json:"ipDiscovery,omitempty"` + // Specifies the destination, format and type of the logs. + LogDeliveryConfigurations []*LogDeliveryConfigurationRequest `json:"logDeliveryConfigurations,omitempty"` + // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + // using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on + // all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + NetworkType *string `json:"networkType,omitempty"` + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications are sent. + // + // The Amazon SNS topic owner must be the same as the cluster owner. + NotificationTopicARN *string `json:"notificationTopicARN,omitempty"` + NotificationTopicRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"notificationTopicRef,omitempty"` + // The initial number of cache nodes that the cluster has. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 40. + // + // If you need more than 40 nodes for your Memcached cluster, please fill out + // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + // Specifies whether the nodes in the cluster are created in a single outpost + // or across multiple outposts. + OutpostMode *string `json:"outpostMode,omitempty"` + // The port number on which each of the cache nodes accepts connections. + Port *int64 `json:"port,omitempty"` + // The EC2 Availability Zone in which the cluster is created. + // + // All nodes belonging to this cluster are placed in the preferred Availability + // Zone. If you want to create your nodes across multiple Availability Zones, + // use PreferredAvailabilityZones. + // + // Default: System chosen Availability Zone. + PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` + // A list of the Availability Zones in which cache nodes are created. The order + // of the zones in the list is not important. + // + // This option is only supported on Memcached. + // + // If you are creating your cluster in an Amazon VPC (recommended) you can only + // locate nodes in Availability Zones that are associated with the subnets in + // the selected subnet group. + // + // The number of Availability Zones listed must equal the value of NumCacheNodes. + // + // If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + // instead, or repeat the Availability Zone multiple times in the list. + // + // Default: System chosen Availability Zones. + PreferredAvailabilityZones []*string `json:"preferredAvailabilityZones,omitempty"` + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` + // The outpost ARN in which the cache cluster is created. + PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` + // The outpost ARNs in which the cache cluster is created. + PreferredOutpostARNs []*string `json:"preferredOutpostARNs,omitempty"` + // The ID of the replication group to which this cluster should belong. If this + // parameter is specified, the cluster is added to the specified replication + // group as a read replica; otherwise, the cluster is a standalone primary that + // is not part of any replication group. + // + // If the specified replication group is Multi-AZ enabled and the Availability + // Zone is not specified, the cluster is created in Availability Zones that + // provide the best spread of read replicas across Availability Zones. + // + // This parameter is only valid if the Engine parameter is redis. + ReplicationGroupID *string `json:"replicationGroupID,omitempty"` + ReplicationGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"replicationGroupRef,omitempty"` + // One or more VPC security groups associated with the cluster. + // + // Use this parameter only when you are creating a cluster in an Amazon Virtual + // Private Cloud (Amazon VPC). + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file is used to populate the node group (shard). The Amazon S3 object name + // in the ARN cannot contain any commas. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotARNs []*string `json:"snapshotARNs,omitempty"` + // The name of a Redis snapshot from which to restore data into the new node + // group (shard). The snapshot status changes to restoring while the new node + // group (shard) is being created. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `json:"snapshotName,omitempty"` + SnapshotRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"snapshotRef,omitempty"` + // The number of days for which ElastiCache retains automatic snapshots before + // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + // taken today is retained for 5 days before being deleted. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` + // The daily time range (in UTC) during which ElastiCache begins taking a daily + // snapshot of your node group (shard). + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, ElastiCache automatically chooses an + // appropriate time range. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `json:"snapshotWindow,omitempty"` + // A list of tags to be added to this resource. + Tags []*Tag `json:"tags,omitempty"` + // A flag that enables in-transit encryption when set to true. + TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` +} + +// CacheClusterStatus defines the observed state of CacheCluster +type CacheClusterStatus struct { + // All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + // that is used to contain resource sync state, account ownership, + // constructed ARN for the resource + // +kubebuilder:validation:Optional + ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` + // All CRS managed by ACK have a common `Status.Conditions` member that + // contains a collection of `ackv1alpha1.Condition` objects that describe + // the various terminal states of the CR and its backend AWS service API + // resource + // +kubebuilder:validation:Optional + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // A flag that enables encryption at-rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the cluster + // is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + // to true when you create a cluster. + // + // Required: Only available when creating a replication group in an Amazon VPC + // using redis version 3.2.6, 4.x or later. + // + // Default: false + // +kubebuilder:validation:Optional + AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` + // A flag that enables using an AuthToken (password) when issuing Redis commands. + // + // Default: false + // +kubebuilder:validation:Optional + AuthTokenEnabled *bool `json:"authTokenEnabled,omitempty"` + // The date the auth token was last modified + // +kubebuilder:validation:Optional + AuthTokenLastModifiedDate *metav1.Time `json:"authTokenLastModifiedDate,omitempty"` + // The date and time when the cluster was created. + // +kubebuilder:validation:Optional + CacheClusterCreateTime *metav1.Time `json:"cacheClusterCreateTime,omitempty"` + // The current state of this cluster, one of the following values: available, + // creating, deleted, deleting, incompatible-network, modifying, rebooting cluster + // nodes, restore-failed, or snapshotting. + // +kubebuilder:validation:Optional + CacheClusterStatus *string `json:"cacheClusterStatus,omitempty"` + // A list of cache nodes that are members of the cluster. + // +kubebuilder:validation:Optional + CacheNodes []*CacheNode `json:"cacheNodes,omitempty"` + // Status of the cache parameter group. + // +kubebuilder:validation:Optional + CacheParameterGroup *CacheParameterGroupStatus_SDK `json:"cacheParameterGroup,omitempty"` + // A list of cache security group elements, composed of name and status sub-elements. + // +kubebuilder:validation:Optional + CacheSecurityGroups []*CacheSecurityGroupMembership `json:"cacheSecurityGroups,omitempty"` + // The URL of the web page where you can download the latest ElastiCache client + // library. + // +kubebuilder:validation:Optional + ClientDownloadLandingPage *string `json:"clientDownloadLandingPage,omitempty"` + // Represents a Memcached cluster endpoint which can be used by an application + // to connect to any node in the cluster. The configuration endpoint will always + // have .cfg in it. + // + // Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 + // +kubebuilder:validation:Optional + ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + // +kubebuilder:validation:Optional + NotificationConfiguration *NotificationConfiguration `json:"notificationConfiguration,omitempty"` + // +kubebuilder:validation:Optional + PendingModifiedValues *PendingModifiedValues `json:"pendingModifiedValues,omitempty"` + // A boolean value indicating whether log delivery is enabled for the replication + // group. + // +kubebuilder:validation:Optional + ReplicationGroupLogDeliveryEnabled *bool `json:"replicationGroupLogDeliveryEnabled,omitempty"` + // A list of VPC Security Groups associated with the cluster. + // +kubebuilder:validation:Optional + SecurityGroups []*SecurityGroupMembership `json:"securityGroups,omitempty"` + // A setting that allows you to migrate your clients to use in-transit encryption, + // with no downtime. + // +kubebuilder:validation:Optional + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` +} + +// CacheCluster is the Schema for the CacheClusters API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="VERSION",type=string,priority=0,JSONPath=`.spec.engineVersion` +// +kubebuilder:printcolumn:name="STATUS",type=string,priority=0,JSONPath=`.status.cacheClusterStatus` +// +kubebuilder:printcolumn:name="ENDPOINT",type=string,priority=1,JSONPath=`.status.configurationEndpoint.address` +// +kubebuilder:printcolumn:name="Synced",type="string",priority=0,JSONPath=".status.conditions[?(@.type==\"ACK.ResourceSynced\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",priority=0,JSONPath=".metadata.creationTimestamp" +type CacheCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CacheClusterSpec `json:"spec,omitempty"` + Status CacheClusterStatus `json:"status,omitempty"` +} + +// CacheClusterList contains a list of CacheCluster +// +kubebuilder:object:root=true +type CacheClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CacheCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CacheCluster{}, &CacheClusterList{}) +} diff --git a/apis/v1alpha1/enums.go b/apis/v1alpha1/enums.go index d2b6b994..ca62567d 100644 --- a/apis/v1alpha1/enums.go +++ b/apis/v1alpha1/enums.go @@ -42,6 +42,7 @@ type AuthenticationType string const ( AuthenticationType_password AuthenticationType = "password" AuthenticationType_no_password AuthenticationType = "no-password" + AuthenticationType_iam AuthenticationType = "iam" ) type AutomaticFailoverStatus string @@ -60,6 +61,20 @@ const ( ChangeType_requires_reboot ChangeType = "requires-reboot" ) +type ClusterMode string + +const ( + ClusterMode_enabled ClusterMode = "enabled" + ClusterMode_disabled ClusterMode = "disabled" + ClusterMode_compatible ClusterMode = "compatible" +) + +type DataStorageUnit string + +const ( + DataStorageUnit_GB DataStorageUnit = "GB" +) + type DataTieringStatus string const ( @@ -74,6 +89,21 @@ const ( DestinationType_kinesis_firehose DestinationType = "kinesis-firehose" ) +type IPDiscovery string + +const ( + IPDiscovery_ipv4 IPDiscovery = "ipv4" + IPDiscovery_ipv6 IPDiscovery = "ipv6" +) + +type InputAuthenticationType string + +const ( + InputAuthenticationType_password InputAuthenticationType = "password" + InputAuthenticationType_no_password_required InputAuthenticationType = "no-password-required" + InputAuthenticationType_iam InputAuthenticationType = "iam" +) + type LogDeliveryConfigurationStatus string const ( @@ -105,6 +135,14 @@ const ( MultiAZStatus_disabled MultiAZStatus = "disabled" ) +type NetworkType string + +const ( + NetworkType_ipv4 NetworkType = "ipv4" + NetworkType_ipv6 NetworkType = "ipv6" + NetworkType_dual_stack NetworkType = "dual_stack" +) + type NodeUpdateInitiatedBy string const ( @@ -171,13 +209,22 @@ const ( type SourceType string const ( - SourceType_cache_cluster SourceType = "cache-cluster" - SourceType_cache_parameter_group SourceType = "cache-parameter-group" - SourceType_cache_security_group SourceType = "cache-security-group" - SourceType_cache_subnet_group SourceType = "cache-subnet-group" - SourceType_replication_group SourceType = "replication-group" - SourceType_user SourceType = "user" - SourceType_user_group SourceType = "user-group" + SourceType_cache_cluster SourceType = "cache-cluster" + SourceType_cache_parameter_group SourceType = "cache-parameter-group" + SourceType_cache_security_group SourceType = "cache-security-group" + SourceType_cache_subnet_group SourceType = "cache-subnet-group" + SourceType_replication_group SourceType = "replication-group" + SourceType_serverless_cache SourceType = "serverless-cache" + SourceType_serverless_cache_snapshot SourceType = "serverless-cache-snapshot" + SourceType_user SourceType = "user" + SourceType_user_group SourceType = "user-group" +) + +type TransitEncryptionMode string + +const ( + TransitEncryptionMode_preferred TransitEncryptionMode = "preferred" + TransitEncryptionMode_required TransitEncryptionMode = "required" ) type UpdateActionStatus string diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index 7ac728ff..4183d7a4 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -1,4 +1,89 @@ resources: + CacheCluster: + fields: + CacheSubnetGroupName: + references: + resource: CacheSubnetGroup + path: Spec.CacheSubnetGroupName + CacheParameterGroupName: + references: + resource: CacheParameterGroup + path: Spec.CacheParameterGroupName + is_immutable: true + ReplicationGroupID: + references: + resource: ReplicationGroup + path: Spec.ReplicationGroupID + is_immutable: true + SnapshotName: + references: + resource: Snapshot + path: Spec.SnapshotName + is_immutable: true + NotificationTopicARN: + references: + service_name: sns + resource: Topic + path: Status.ACKResourceMetadata.ARN + AuthToken: + is_secret: true + PreferredAvailabilityZone: + late_initialize: {} + PreferredAvailabilityZones: + compare: + is_ignored: true + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: VERSION + json_path: .spec.engineVersion + type: string + index: 10 + - name: STATUS + json_path: .status.cacheClusterStatus + type: string + index: 20 + - name: ENDPOINT + json_path: .status.configurationEndpoint.address + type: string + index: 30 + priority: 1 + exceptions: + errors: + 404: + code: CacheClusterNotFound + terminal_codes: + - ReplicationGroupNotFoundFault + - InvalidReplicationGroupStateFault + - CacheClusterAlreadyExistsFault + - InsufficientCacheClusterCapacityFault + - CacheSecurityGroupNotFoundFault + - CacheSubnetGroupNotFoundFault + - ClusterQuotaForCustomerExceededFault + - NodeQuotaForClusterExceededFault + - NodeQuotaForCustomerExceededFault + - CacheParameterGroupNotFoundFault + - InvalidVPCNetworkStateFault + - TagQuotaPerResource + - InvalidParameterValue + - InvalidParameterCombination + hooks: + sdk_create_post_set_output: + template_path: hooks/cache_cluster/sdk_create_post_set_output.go.tpl + sdk_delete_pre_build_request: + template_path: hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl + sdk_read_many_post_set_output: + template_path: hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl + sdk_update_pre_build_request: + template_path: hooks/cache_cluster/sdk_update_pre_build_request.go.tpl + sdk_update_post_build_request: + template_path: hooks/cache_cluster/sdk_update_post_build_request.go.tpl + sdk_update_post_set_output: + template_path: hooks/cache_cluster/sdk_update_post_set_output.go.tpl + delta_post_compare: + code: "modifyDelta(delta, a, b)" CacheSubnetGroup: exceptions: errors: @@ -237,10 +322,17 @@ operations: set_output_custom_method_name: CustomCreateUserGroupSetOutput DescribeUserGroups: set_output_custom_method_name: CustomDescribeUserGroupsSetOutput + CreateCacheCluster: + set_output_custom_method_name: customCreateCacheClusterSetOutput + ModifyCacheCluster: + set_output_custom_method_name: customModifyCacheClusterSetOutput + override_values: + ApplyImmediately: true ignore: resource_names: + - ServerlessCache + - ServerlessCacheSnapshot - GlobalReplicationGroup - - CacheCluster - CacheSecurityGroup field_paths: - DescribeSnapshotsInput.CacheClusterId @@ -255,3 +347,5 @@ ignore: - CreateReplicationGroupInput.GlobalReplicationGroupId - CreateReplicationGroupInput.AutoMinorVersionUpgrade - CreateReplicationGroupInput.NumCacheClusters + - CacheCluster.LogDeliveryConfigurations + - PendingModifiedValues.LogDeliveryConfigurations \ No newline at end of file diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index ea28aaa0..a3ef42e4 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -40,7 +40,7 @@ type AvailabilityZone struct { } // Contains all of the attributes of a specific cluster. -type CacheCluster struct { +type CacheCluster_SDK struct { ARN *string `json:"arn,omitempty"` AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` AuthTokenEnabled *bool `json:"authTokenEnabled,omitempty"` @@ -50,23 +50,37 @@ type CacheCluster struct { CacheClusterID *string `json:"cacheClusterID,omitempty"` CacheClusterStatus *string `json:"cacheClusterStatus,omitempty"` CacheNodeType *string `json:"cacheNodeType,omitempty"` - CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` - ClientDownloadLandingPage *string `json:"clientDownloadLandingPage,omitempty"` + CacheNodes []*CacheNode `json:"cacheNodes,omitempty"` + // Status of the cache parameter group. + CacheParameterGroup *CacheParameterGroupStatus_SDK `json:"cacheParameterGroup,omitempty"` + CacheSecurityGroups []*CacheSecurityGroupMembership `json:"cacheSecurityGroups,omitempty"` + CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` + ClientDownloadLandingPage *string `json:"clientDownloadLandingPage,omitempty"` // Represents the information required for client programs to connect to a cache // node. - ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` - Engine *string `json:"engine,omitempty"` - EngineVersion *string `json:"engineVersion,omitempty"` - LogDeliveryConfigurations []*LogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` - NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` - PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` - PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` - PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` - ReplicationGroupID *string `json:"replicationGroupID,omitempty"` - ReplicationGroupLogDeliveryEnabled *bool `json:"replicationGroupLogDeliveryEnabled,omitempty"` - SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` - SnapshotWindow *string `json:"snapshotWindow,omitempty"` - TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` + Engine *string `json:"engine,omitempty"` + EngineVersion *string `json:"engineVersion,omitempty"` + IPDiscovery *string `json:"ipDiscovery,omitempty"` + NetworkType *string `json:"networkType,omitempty"` + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + NotificationConfiguration *NotificationConfiguration `json:"notificationConfiguration,omitempty"` + NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + // A group of settings that are applied to the cluster in the future, or that + // are currently being applied. + PendingModifiedValues *PendingModifiedValues `json:"pendingModifiedValues,omitempty"` + PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` + PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` + ReplicationGroupID *string `json:"replicationGroupID,omitempty"` + ReplicationGroupLogDeliveryEnabled *bool `json:"replicationGroupLogDeliveryEnabled,omitempty"` + SecurityGroups []*SecurityGroupMembership `json:"securityGroups,omitempty"` + SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` + SnapshotWindow *string `json:"snapshotWindow,omitempty"` + TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` } // Provides all of the details about a particular cache engine version. @@ -86,44 +100,45 @@ type CacheEngineVersion struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // -// - General purpose: Current generation: M6g node types: (available only -// for Redis engine version 5.0.6 onward and for Memcached engine version -// 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, -// cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge -// For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) -// M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, -// cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, -// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available -// only for Redis engine version 5.0.6 onward and for Memcached engine version -// 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 -// node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: -// cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not -// recommended. Existing clusters are still supported but creation of new -// clusters is not supported for these types.) T1 node types: cache.t1.micro -// M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge -// M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge +// - General purpose: Current generation: M7g node types: cache.m7g.large, +// cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, +// cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported +// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) +// M6g node types (available only for Redis engine version 5.0.6 onward and +// for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, +// cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, +// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, +// cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: +// cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge +// T4g node types (available only for Redis engine version 5.0.6 onward and +// Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, +// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium +// T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous +// generation: (not recommended. Existing clusters are still supported but +// creation of new clusters is not supported for these types.) T1 node types: +// cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, +// cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, +// cache.m3.2xlarge // // - Compute optimized: Previous generation: (not recommended. Existing clusters // are still supported but creation of new clusters is not supported for // these types.) C1 node types: cache.c1.xlarge // -// - Memory optimized with data tiering: Current generation: R6gd node types -// (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, -// cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, -// cache.r6gd.16xlarge -// -// - Memory optimized: Current generation: R6g node types (available only -// for Redis engine version 5.0.6 onward and for Memcached engine version -// 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, -// cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge -// For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) -// R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, -// cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, -// cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge -// Previous generation: (not recommended. Existing clusters are still supported -// but creation of new clusters is not supported for these types.) M2 node -// types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: -// cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge +// - Memory optimized: Current generation: R7g node types: cache.r7g.large, +// cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, +// cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported +// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) +// R6g node types (available only for Redis engine version 5.0.6 onward and +// for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, +// cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, +// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, +// cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: +// cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, +// cache.r4.16xlarge Previous generation: (not recommended. Existing clusters +// are still supported but creation of new clusters is not supported for +// these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, +// cache.r3.8xlarge // // Additional node type info // @@ -180,8 +195,9 @@ type CacheNodeUpdateStatus struct { // Status of the cache parameter group. type CacheParameterGroupStatus_SDK struct { - CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` - ParameterApplyStatus *string `json:"parameterApplyStatus,omitempty"` + CacheNodeIDsToReboot []*string `json:"cacheNodeIDsToReboot,omitempty"` + CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` + ParameterApplyStatus *string `json:"parameterApplyStatus,omitempty"` } // Represents the output of a CreateCacheParameterGroup operation. @@ -475,11 +491,13 @@ type PendingLogDeliveryConfiguration struct { // A group of settings that are applied to the cluster in the future, or that // are currently being applied. type PendingModifiedValues struct { - AuthTokenStatus *string `json:"authTokenStatus,omitempty"` - CacheNodeType *string `json:"cacheNodeType,omitempty"` - EngineVersion *string `json:"engineVersion,omitempty"` - LogDeliveryConfigurations []*PendingLogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` - NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + AuthTokenStatus *string `json:"authTokenStatus,omitempty"` + CacheNodeIDsToRemove []*string `json:"cacheNodeIDsToRemove,omitempty"` + CacheNodeType *string `json:"cacheNodeType,omitempty"` + EngineVersion *string `json:"engineVersion,omitempty"` + NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` } // Update action that has been processed for the corresponding apply/stop request diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 1a271741..013abe4c 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -71,6 +71,419 @@ func (in *AvailabilityZone) DeepCopy() *AvailabilityZone { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheCluster. +func (in *CacheCluster) DeepCopy() *CacheCluster { + if in == nil { + return nil + } + out := new(CacheCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CacheCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheClusterList) DeepCopyInto(out *CacheClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CacheCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheClusterList. +func (in *CacheClusterList) DeepCopy() *CacheClusterList { + if in == nil { + return nil + } + out := new(CacheClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CacheClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheClusterSpec) DeepCopyInto(out *CacheClusterSpec) { + *out = *in + if in.AZMode != nil { + in, out := &in.AZMode, &out.AZMode + *out = new(string) + **out = **in + } + if in.AuthToken != nil { + in, out := &in.AuthToken, &out.AuthToken + *out = new(corev1alpha1.SecretKeyReference) + **out = **in + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.CacheClusterID != nil { + in, out := &in.CacheClusterID, &out.CacheClusterID + *out = new(string) + **out = **in + } + if in.CacheNodeType != nil { + in, out := &in.CacheNodeType, &out.CacheNodeType + *out = new(string) + **out = **in + } + if in.CacheParameterGroupName != nil { + in, out := &in.CacheParameterGroupName, &out.CacheParameterGroupName + *out = new(string) + **out = **in + } + if in.CacheParameterGroupRef != nil { + in, out := &in.CacheParameterGroupRef, &out.CacheParameterGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.CacheSecurityGroupNames != nil { + in, out := &in.CacheSecurityGroupNames, &out.CacheSecurityGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheSubnetGroupName != nil { + in, out := &in.CacheSubnetGroupName, &out.CacheSubnetGroupName + *out = new(string) + **out = **in + } + if in.CacheSubnetGroupRef != nil { + in, out := &in.CacheSubnetGroupRef, &out.CacheSubnetGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } + if in.LogDeliveryConfigurations != nil { + in, out := &in.LogDeliveryConfigurations, &out.LogDeliveryConfigurations + *out = make([]*LogDeliveryConfigurationRequest, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(LogDeliveryConfigurationRequest) + (*in).DeepCopyInto(*out) + } + } + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.NotificationTopicARN != nil { + in, out := &in.NotificationTopicARN, &out.NotificationTopicARN + *out = new(string) + **out = **in + } + if in.NotificationTopicRef != nil { + in, out := &in.NotificationTopicRef, &out.NotificationTopicRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.NumCacheNodes != nil { + in, out := &in.NumCacheNodes, &out.NumCacheNodes + *out = new(int64) + **out = **in + } + if in.OutpostMode != nil { + in, out := &in.OutpostMode, &out.OutpostMode + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int64) + **out = **in + } + if in.PreferredAvailabilityZone != nil { + in, out := &in.PreferredAvailabilityZone, &out.PreferredAvailabilityZone + *out = new(string) + **out = **in + } + if in.PreferredAvailabilityZones != nil { + in, out := &in.PreferredAvailabilityZones, &out.PreferredAvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.PreferredOutpostARN != nil { + in, out := &in.PreferredOutpostARN, &out.PreferredOutpostARN + *out = new(string) + **out = **in + } + if in.PreferredOutpostARNs != nil { + in, out := &in.PreferredOutpostARNs, &out.PreferredOutpostARNs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReplicationGroupID != nil { + in, out := &in.ReplicationGroupID, &out.ReplicationGroupID + *out = new(string) + **out = **in + } + if in.ReplicationGroupRef != nil { + in, out := &in.ReplicationGroupRef, &out.ReplicationGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnapshotARNs != nil { + in, out := &in.SnapshotARNs, &out.SnapshotARNs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnapshotName != nil { + in, out := &in.SnapshotName, &out.SnapshotName + *out = new(string) + **out = **in + } + if in.SnapshotRef != nil { + in, out := &in.SnapshotRef, &out.SnapshotRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.SnapshotRetentionLimit != nil { + in, out := &in.SnapshotRetentionLimit, &out.SnapshotRetentionLimit + *out = new(int64) + **out = **in + } + if in.SnapshotWindow != nil { + in, out := &in.SnapshotWindow, &out.SnapshotWindow + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } + if in.TransitEncryptionEnabled != nil { + in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheClusterSpec. +func (in *CacheClusterSpec) DeepCopy() *CacheClusterSpec { + if in == nil { + return nil + } + out := new(CacheClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheClusterStatus) DeepCopyInto(out *CacheClusterStatus) { + *out = *in + if in.ACKResourceMetadata != nil { + in, out := &in.ACKResourceMetadata, &out.ACKResourceMetadata + *out = new(corev1alpha1.ResourceMetadata) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]*corev1alpha1.Condition, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.Condition) + (*in).DeepCopyInto(*out) + } + } + } + if in.AtRestEncryptionEnabled != nil { + in, out := &in.AtRestEncryptionEnabled, &out.AtRestEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.AuthTokenEnabled != nil { + in, out := &in.AuthTokenEnabled, &out.AuthTokenEnabled + *out = new(bool) + **out = **in + } + if in.AuthTokenLastModifiedDate != nil { + in, out := &in.AuthTokenLastModifiedDate, &out.AuthTokenLastModifiedDate + *out = (*in).DeepCopy() + } + if in.CacheClusterCreateTime != nil { + in, out := &in.CacheClusterCreateTime, &out.CacheClusterCreateTime + *out = (*in).DeepCopy() + } + if in.CacheClusterStatus != nil { + in, out := &in.CacheClusterStatus, &out.CacheClusterStatus + *out = new(string) + **out = **in + } + if in.CacheNodes != nil { + in, out := &in.CacheNodes, &out.CacheNodes + *out = make([]*CacheNode, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheNode) + (*in).DeepCopyInto(*out) + } + } + } + if in.CacheParameterGroup != nil { + in, out := &in.CacheParameterGroup, &out.CacheParameterGroup + *out = new(CacheParameterGroupStatus_SDK) + (*in).DeepCopyInto(*out) + } + if in.CacheSecurityGroups != nil { + in, out := &in.CacheSecurityGroups, &out.CacheSecurityGroups + *out = make([]*CacheSecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheSecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } + if in.ClientDownloadLandingPage != nil { + in, out := &in.ClientDownloadLandingPage, &out.ClientDownloadLandingPage + *out = new(string) + **out = **in + } + if in.ConfigurationEndpoint != nil { + in, out := &in.ConfigurationEndpoint, &out.ConfigurationEndpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + if in.NotificationConfiguration != nil { + in, out := &in.NotificationConfiguration, &out.NotificationConfiguration + *out = new(NotificationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.PendingModifiedValues != nil { + in, out := &in.PendingModifiedValues, &out.PendingModifiedValues + *out = new(PendingModifiedValues) + (*in).DeepCopyInto(*out) + } + if in.ReplicationGroupLogDeliveryEnabled != nil { + in, out := &in.ReplicationGroupLogDeliveryEnabled, &out.ReplicationGroupLogDeliveryEnabled + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*SecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheClusterStatus. +func (in *CacheClusterStatus) DeepCopy() *CacheClusterStatus { + if in == nil { + return nil + } + out := new(CacheClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheCluster_SDK) DeepCopyInto(out *CacheCluster_SDK) { *out = *in if in.ARN != nil { in, out := &in.ARN, &out.ARN @@ -115,6 +528,33 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = new(string) **out = **in } + if in.CacheNodes != nil { + in, out := &in.CacheNodes, &out.CacheNodes + *out = make([]*CacheNode, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheNode) + (*in).DeepCopyInto(*out) + } + } + } + if in.CacheParameterGroup != nil { + in, out := &in.CacheParameterGroup, &out.CacheParameterGroup + *out = new(CacheParameterGroupStatus_SDK) + (*in).DeepCopyInto(*out) + } + if in.CacheSecurityGroups != nil { + in, out := &in.CacheSecurityGroups, &out.CacheSecurityGroups + *out = make([]*CacheSecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheSecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } if in.CacheSubnetGroupName != nil { in, out := &in.CacheSubnetGroupName, &out.CacheSubnetGroupName *out = new(string) @@ -140,22 +580,31 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = new(string) **out = **in } - if in.LogDeliveryConfigurations != nil { - in, out := &in.LogDeliveryConfigurations, &out.LogDeliveryConfigurations - *out = make([]*LogDeliveryConfiguration, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(LogDeliveryConfiguration) - (*in).DeepCopyInto(*out) - } - } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.NotificationConfiguration != nil { + in, out := &in.NotificationConfiguration, &out.NotificationConfiguration + *out = new(NotificationConfiguration) + (*in).DeepCopyInto(*out) } if in.NumCacheNodes != nil { in, out := &in.NumCacheNodes, &out.NumCacheNodes *out = new(int64) **out = **in } + if in.PendingModifiedValues != nil { + in, out := &in.PendingModifiedValues, &out.PendingModifiedValues + *out = new(PendingModifiedValues) + (*in).DeepCopyInto(*out) + } if in.PreferredAvailabilityZone != nil { in, out := &in.PreferredAvailabilityZone, &out.PreferredAvailabilityZone *out = new(string) @@ -181,6 +630,17 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = new(bool) **out = **in } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*SecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } if in.SnapshotRetentionLimit != nil { in, out := &in.SnapshotRetentionLimit, &out.SnapshotRetentionLimit *out = new(int64) @@ -196,14 +656,19 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = new(bool) **out = **in } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheCluster. -func (in *CacheCluster) DeepCopy() *CacheCluster { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheCluster_SDK. +func (in *CacheCluster_SDK) DeepCopy() *CacheCluster_SDK { if in == nil { return nil } - out := new(CacheCluster) + out := new(CacheCluster_SDK) in.DeepCopyInto(out) return out } @@ -1877,6 +2342,17 @@ func (in *PendingModifiedValues) DeepCopyInto(out *PendingModifiedValues) { *out = new(string) **out = **in } + if in.CacheNodeIDsToRemove != nil { + in, out := &in.CacheNodeIDsToRemove, &out.CacheNodeIDsToRemove + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.CacheNodeType != nil { in, out := &in.CacheNodeType, &out.CacheNodeType *out = new(string) @@ -1887,22 +2363,21 @@ func (in *PendingModifiedValues) DeepCopyInto(out *PendingModifiedValues) { *out = new(string) **out = **in } - if in.LogDeliveryConfigurations != nil { - in, out := &in.LogDeliveryConfigurations, &out.LogDeliveryConfigurations - *out = make([]*PendingLogDeliveryConfiguration, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PendingLogDeliveryConfiguration) - (*in).DeepCopyInto(*out) - } - } - } if in.NumCacheNodes != nil { in, out := &in.NumCacheNodes, &out.NumCacheNodes *out = new(int64) **out = **in } + if in.TransitEncryptionEnabled != nil { + in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PendingModifiedValues. diff --git a/cmd/controller/main.go b/cmd/controller/main.go index e617b5e8..efd665cc 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -25,6 +25,7 @@ import ( acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackrtutil "github.com/aws-controllers-k8s/runtime/pkg/util" ackrtwebhook "github.com/aws-controllers-k8s/runtime/pkg/webhook" + snsapitypes "github.com/aws-controllers-k8s/sns-controller/apis/v1alpha1" flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -40,6 +41,7 @@ import ( svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_cluster" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_parameter_group" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_subnet_group" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/replication_group" @@ -64,6 +66,7 @@ func init() { _ = svctypes.AddToScheme(scheme) _ = ackv1alpha1.AddToScheme(scheme) _ = ec2apitypes.AddToScheme(scheme) + _ = snsapitypes.AddToScheme(scheme) } func main() { diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml new file mode 100644 index 00000000..c51ef39e --- /dev/null +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml @@ -0,0 +1,871 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: cacheclusters.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: CacheCluster + listKind: CacheClusterList + plural: cacheclusters + singular: cachecluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.engineVersion + name: VERSION + type: string + - jsonPath: .status.cacheClusterStatus + name: STATUS + type: string + - jsonPath: .status.configurationEndpoint.address + name: ENDPOINT + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CacheCluster is the Schema for the CacheClusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + CacheClusterSpec defines the desired state of CacheCluster. + + + Contains all of the attributes of a specific cluster. + properties: + authToken: + description: |- + Reserved parameter. The password used to access a password protected server. + + + Password constraints: + + + * Must be only printable ASCII characters. + + + * Must be at least 16 characters and no more than 128 characters in length. + + + * The only permitted printable special characters are !, &, #, $, ^, <, + >, and -. Other printable special characters cannot be used in the AUTH + token. + + + For more information, see AUTH password (http://redis.io/commands/AUTH) at + http://redis.io/commands/AUTH. + properties: + key: + description: Key is the key within the secret + type: string + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + required: + - key + type: object + x-kubernetes-map-type: atomic + autoMinorVersionUpgrade: + description: |- + If you are running Redis engine version 6.0 or later, set this parameter + to yes if you want to opt-in to the next auto minor version upgrade campaign. + This parameter is disabled for previous versions. + type: boolean + azMode: + description: |- + Specifies whether the nodes in this Memcached cluster are created in a single + Availability Zone or created across multiple Availability Zones in the cluster's + region. + + + This parameter is only supported for Memcached clusters. + + + If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + assumes single-az mode. + type: string + cacheClusterID: + description: |- + The node group (shard) identifier. This parameter is stored as a lowercase + string. + + + Constraints: + + + * A name must contain from 1 to 50 alphanumeric characters or hyphens. + + + * The first character must be a letter. + + + * A name cannot end with a hyphen or contain two consecutive hyphens. + type: string + cacheNodeType: + description: |- + The compute and memory capacity of the nodes in the node group (shard). + + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis engine version 5.0.6 onward and + Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + + * Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + + Additional node type info + + + * All current generation instance types are created in Amazon VPC by default. + + + * Redis append-only files (AOF) are not supported for T1 or T2 instances. + + + * Redis Multi-AZ with automatic failover is not supported on T1 instances. + + + * Redis configuration variables appendonly and appendfsync are not supported + on Redis version 2.8.22 and later. + type: string + cacheParameterGroupName: + description: |- + The name of the parameter group to associate with this cluster. If this argument + is omitted, the default parameter group for the specified engine is used. + You cannot use any parameter group which has cluster-enabled='yes' when creating + a cluster. + type: string + cacheParameterGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + cacheSecurityGroupNames: + description: |- + A list of security group names to associate with this cluster. + + + Use this parameter only when you are creating a cluster outside of an Amazon + Virtual Private Cloud (Amazon VPC). + items: + type: string + type: array + cacheSubnetGroupName: + description: |- + The name of the subnet group to be used for the cluster. + + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + + + If you're going to launch your cluster in an Amazon VPC, you need to create + a subnet group before you start creating a cluster. For more information, + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + type: string + cacheSubnetGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + engine: + description: |- + The name of the cache engine to be used for this cluster. + + + Valid values for this parameter are: memcached | redis + type: string + engineVersion: + description: |- + The version number of the cache engine to be used for this cluster. To view + the supported cache engine versions, use the DescribeCacheEngineVersions + operation. + + + Important: You can upgrade to a newer engine version (see Selecting a Cache + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + but you cannot downgrade to an earlier engine version. If you want to use + an earlier engine version, you must delete the existing cluster or replication + group and create it anew with the earlier engine version. + type: string + ipDiscovery: + description: |- + The network type you choose when modifying a cluster, either ipv4 | ipv6. + IPv6 is supported for workloads using Redis engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string + logDeliveryConfigurations: + description: Specifies the destination, format and type of the logs. + items: + description: Specifies the destination, format and type of the logs. + properties: + destinationDetails: + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. + properties: + cloudWatchLogsDetails: + description: The configuration details of the CloudWatch + Logs destination. + properties: + logGroup: + type: string + type: object + kinesisFirehoseDetails: + description: The configuration details of the Kinesis Data + Firehose destination. + properties: + deliveryStream: + type: string + type: object + type: object + destinationType: + type: string + enabled: + type: boolean + logFormat: + type: string + logType: + type: string + type: object + type: array + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on + all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + type: string + notificationTopicARN: + description: |- + The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + (SNS) topic to which notifications are sent. + + + The Amazon SNS topic owner must be the same as the cluster owner. + type: string + notificationTopicRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + numCacheNodes: + description: |- + The initial number of cache nodes that the cluster has. + + + For clusters running Redis, this value must be 1. For clusters running Memcached, + this value must be between 1 and 40. + + + If you need more than 40 nodes for your Memcached cluster, please fill out + the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + format: int64 + type: integer + outpostMode: + description: |- + Specifies whether the nodes in the cluster are created in a single outpost + or across multiple outposts. + type: string + port: + description: The port number on which each of the cache nodes accepts + connections. + format: int64 + type: integer + preferredAvailabilityZone: + description: |- + The EC2 Availability Zone in which the cluster is created. + + + All nodes belonging to this cluster are placed in the preferred Availability + Zone. If you want to create your nodes across multiple Availability Zones, + use PreferredAvailabilityZones. + + + Default: System chosen Availability Zone. + type: string + preferredAvailabilityZones: + description: |- + A list of the Availability Zones in which cache nodes are created. The order + of the zones in the list is not important. + + + This option is only supported on Memcached. + + + If you are creating your cluster in an Amazon VPC (recommended) you can only + locate nodes in Availability Zones that are associated with the subnets in + the selected subnet group. + + + The number of Availability Zones listed must equal the value of NumCacheNodes. + + + If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + instead, or repeat the Availability Zone multiple times in the list. + + + Default: System chosen Availability Zones. + items: + type: string + type: array + preferredMaintenanceWindow: + description: |- + Specifies the weekly time range during which maintenance on the cluster is + performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. + type: string + preferredOutpostARN: + description: The outpost ARN in which the cache cluster is created. + type: string + preferredOutpostARNs: + description: The outpost ARNs in which the cache cluster is created. + items: + type: string + type: array + replicationGroupID: + description: |- + The ID of the replication group to which this cluster should belong. If this + parameter is specified, the cluster is added to the specified replication + group as a read replica; otherwise, the cluster is a standalone primary that + is not part of any replication group. + + + If the specified replication group is Multi-AZ enabled and the Availability + Zone is not specified, the cluster is created in Availability Zones that + provide the best spread of read replicas across Availability Zones. + + + This parameter is only valid if the Engine parameter is redis. + type: string + replicationGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + securityGroupIDs: + description: |- + One or more VPC security groups associated with the cluster. + + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + items: + type: string + type: array + snapshotARNs: + description: |- + A single-element string list containing an Amazon Resource Name (ARN) that + uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + file is used to populate the node group (shard). The Amazon S3 object name + in the ARN cannot contain any commas. + + + This parameter is only valid if the Engine parameter is redis. + + + Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + items: + type: string + type: array + snapshotName: + description: |- + The name of a Redis snapshot from which to restore data into the new node + group (shard). The snapshot status changes to restoring while the new node + group (shard) is being created. + + + This parameter is only valid if the Engine parameter is redis. + type: string + snapshotRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + snapshotRetentionLimit: + description: |- + The number of days for which ElastiCache retains automatic snapshots before + deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + taken today is retained for 5 days before being deleted. + + + This parameter is only valid if the Engine parameter is redis. + + + Default: 0 (i.e., automatic backups are disabled for this cache cluster). + format: int64 + type: integer + snapshotWindow: + description: |- + The daily time range (in UTC) during which ElastiCache begins taking a daily + snapshot of your node group (shard). + + + Example: 05:00-09:00 + + + If you do not specify this parameter, ElastiCache automatically chooses an + appropriate time range. + + + This parameter is only valid if the Engine parameter is redis. + type: string + tags: + description: A list of tags to be added to this resource. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + transitEncryptionEnabled: + description: A flag that enables in-transit encryption when set to + true. + type: boolean + required: + - cacheClusterID + type: object + status: + description: CacheClusterStatus defines the observed state of CacheCluster + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + atRestEncryptionEnabled: + description: |- + A flag that enables encryption at-rest when set to true. + + + You cannot modify the value of AtRestEncryptionEnabled after the cluster + is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + to true when you create a cluster. + + + Required: Only available when creating a replication group in an Amazon VPC + using redis version 3.2.6, 4.x or later. + + + Default: false + type: boolean + authTokenEnabled: + description: |- + A flag that enables using an AuthToken (password) when issuing Redis commands. + + + Default: false + type: boolean + authTokenLastModifiedDate: + description: The date the auth token was last modified + format: date-time + type: string + cacheClusterCreateTime: + description: The date and time when the cluster was created. + format: date-time + type: string + cacheClusterStatus: + description: |- + The current state of this cluster, one of the following values: available, + creating, deleted, deleting, incompatible-network, modifying, rebooting cluster + nodes, restore-failed, or snapshotting. + type: string + cacheNodes: + description: A list of cache nodes that are members of the cluster. + items: + description: |- + Represents an individual cache node within a cluster. Each cache node runs + its own instance of the cluster's protocol-compliant caching software - either + Memcached or Redis. + + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis engine version 5.0.6 onward and + Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + + * Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + + Additional node type info + + + * All current generation instance types are created in Amazon VPC by default. + + + * Redis append-only files (AOF) are not supported for T1 or T2 instances. + + + * Redis Multi-AZ with automatic failover is not supported on T1 instances. + + + * Redis configuration variables appendonly and appendfsync are not supported + on Redis version 2.8.22 and later. + properties: + cacheNodeCreateTime: + format: date-time + type: string + cacheNodeID: + type: string + cacheNodeStatus: + type: string + customerAvailabilityZone: + type: string + customerOutpostARN: + type: string + endpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + parameterGroupStatus: + type: string + sourceCacheNodeID: + type: string + type: object + type: array + cacheParameterGroup: + description: Status of the cache parameter group. + properties: + cacheNodeIDsToReboot: + items: + type: string + type: array + cacheParameterGroupName: + type: string + parameterApplyStatus: + type: string + type: object + cacheSecurityGroups: + description: A list of cache security group elements, composed of + name and status sub-elements. + items: + description: Represents a cluster's status within a particular cache + security group. + properties: + cacheSecurityGroupName: + type: string + status: + type: string + type: object + type: array + clientDownloadLandingPage: + description: |- + The URL of the web page where you can download the latest ElastiCache client + library. + type: string + conditions: + description: |- + All CRS managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + configurationEndpoint: + description: |- + Represents a Memcached cluster endpoint which can be used by an application + to connect to any node in the cluster. The configuration endpoint will always + have .cfg in it. + + + Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + notificationConfiguration: + description: |- + Describes a notification topic and its status. Notification topics are used + for publishing ElastiCache events to subscribers using Amazon Simple Notification + Service (SNS). + properties: + topicARN: + type: string + topicStatus: + type: string + type: object + pendingModifiedValues: + description: |- + A group of settings that are applied to the cluster in the future, or that + are currently being applied. + properties: + authTokenStatus: + type: string + cacheNodeIDsToRemove: + items: + type: string + type: array + cacheNodeType: + type: string + engineVersion: + type: string + numCacheNodes: + format: int64 + type: integer + transitEncryptionEnabled: + type: boolean + transitEncryptionMode: + type: string + type: object + replicationGroupLogDeliveryEnabled: + description: |- + A boolean value indicating whether log delivery is enabled for the replication + group. + type: boolean + securityGroups: + description: A list of VPC Security Groups associated with the cluster. + items: + description: Represents a single cache security group and its status. + properties: + securityGroupID: + type: string + status: + type: string + type: object + type: array + transitEncryptionMode: + description: |- + A setting that allows you to migrate your clients to use in-transit encryption, + with no downtime. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 49468741..898b2121 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,6 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - common + - bases/elasticache.services.k8s.aws_cacheclusters.yaml - bases/elasticache.services.k8s.aws_cacheparametergroups.yaml - bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml - bases/elasticache.services.k8s.aws_replicationgroups.yaml diff --git a/config/rbac/cluster-role-controller.yaml b/config/rbac/cluster-role-controller.yaml index e8ef1c3c..bb1c76a1 100644 --- a/config/rbac/cluster-role-controller.yaml +++ b/config/rbac/cluster-role-controller.yaml @@ -58,6 +58,26 @@ rules: verbs: - get - list +- apiGroups: + - elasticache.services.k8s.aws + resources: + - cacheclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - elasticache.services.k8s.aws + resources: + - cacheclusters/status + verbs: + - get + - patch + - update - apiGroups: - elasticache.services.k8s.aws resources: @@ -218,3 +238,17 @@ rules: - get - patch - update +- apiGroups: + - sns.services.k8s.aws + resources: + - topics + verbs: + - get + - list +- apiGroups: + - sns.services.k8s.aws + resources: + - topics/status + verbs: + - get + - list diff --git a/config/rbac/role-reader.yaml b/config/rbac/role-reader.yaml index 397046b6..797e8dc8 100644 --- a/config/rbac/role-reader.yaml +++ b/config/rbac/role-reader.yaml @@ -9,6 +9,7 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups diff --git a/config/rbac/role-writer.yaml b/config/rbac/role-writer.yaml index 2afde512..92c27962 100644 --- a/config/rbac/role-writer.yaml +++ b/config/rbac/role-writer.yaml @@ -9,6 +9,7 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups @@ -26,6 +27,7 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups diff --git a/generator.yaml b/generator.yaml index 7ac728ff..4183d7a4 100644 --- a/generator.yaml +++ b/generator.yaml @@ -1,4 +1,89 @@ resources: + CacheCluster: + fields: + CacheSubnetGroupName: + references: + resource: CacheSubnetGroup + path: Spec.CacheSubnetGroupName + CacheParameterGroupName: + references: + resource: CacheParameterGroup + path: Spec.CacheParameterGroupName + is_immutable: true + ReplicationGroupID: + references: + resource: ReplicationGroup + path: Spec.ReplicationGroupID + is_immutable: true + SnapshotName: + references: + resource: Snapshot + path: Spec.SnapshotName + is_immutable: true + NotificationTopicARN: + references: + service_name: sns + resource: Topic + path: Status.ACKResourceMetadata.ARN + AuthToken: + is_secret: true + PreferredAvailabilityZone: + late_initialize: {} + PreferredAvailabilityZones: + compare: + is_ignored: true + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: VERSION + json_path: .spec.engineVersion + type: string + index: 10 + - name: STATUS + json_path: .status.cacheClusterStatus + type: string + index: 20 + - name: ENDPOINT + json_path: .status.configurationEndpoint.address + type: string + index: 30 + priority: 1 + exceptions: + errors: + 404: + code: CacheClusterNotFound + terminal_codes: + - ReplicationGroupNotFoundFault + - InvalidReplicationGroupStateFault + - CacheClusterAlreadyExistsFault + - InsufficientCacheClusterCapacityFault + - CacheSecurityGroupNotFoundFault + - CacheSubnetGroupNotFoundFault + - ClusterQuotaForCustomerExceededFault + - NodeQuotaForClusterExceededFault + - NodeQuotaForCustomerExceededFault + - CacheParameterGroupNotFoundFault + - InvalidVPCNetworkStateFault + - TagQuotaPerResource + - InvalidParameterValue + - InvalidParameterCombination + hooks: + sdk_create_post_set_output: + template_path: hooks/cache_cluster/sdk_create_post_set_output.go.tpl + sdk_delete_pre_build_request: + template_path: hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl + sdk_read_many_post_set_output: + template_path: hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl + sdk_update_pre_build_request: + template_path: hooks/cache_cluster/sdk_update_pre_build_request.go.tpl + sdk_update_post_build_request: + template_path: hooks/cache_cluster/sdk_update_post_build_request.go.tpl + sdk_update_post_set_output: + template_path: hooks/cache_cluster/sdk_update_post_set_output.go.tpl + delta_post_compare: + code: "modifyDelta(delta, a, b)" CacheSubnetGroup: exceptions: errors: @@ -237,10 +322,17 @@ operations: set_output_custom_method_name: CustomCreateUserGroupSetOutput DescribeUserGroups: set_output_custom_method_name: CustomDescribeUserGroupsSetOutput + CreateCacheCluster: + set_output_custom_method_name: customCreateCacheClusterSetOutput + ModifyCacheCluster: + set_output_custom_method_name: customModifyCacheClusterSetOutput + override_values: + ApplyImmediately: true ignore: resource_names: + - ServerlessCache + - ServerlessCacheSnapshot - GlobalReplicationGroup - - CacheCluster - CacheSecurityGroup field_paths: - DescribeSnapshotsInput.CacheClusterId @@ -255,3 +347,5 @@ ignore: - CreateReplicationGroupInput.GlobalReplicationGroupId - CreateReplicationGroupInput.AutoMinorVersionUpgrade - CreateReplicationGroupInput.NumCacheClusters + - CacheCluster.LogDeliveryConfigurations + - PendingModifiedValues.LogDeliveryConfigurations \ No newline at end of file diff --git a/go.mod b/go.mod index 0f033d3d..75c4bdcb 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ toolchain go1.21.5 require ( github.com/aws-controllers-k8s/ec2-controller v1.0.7 github.com/aws-controllers-k8s/runtime v0.34.0 + github.com/aws-controllers-k8s/sns-controller v1.0.11 github.com/aws/aws-sdk-go v1.49.0 github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.4.1 diff --git a/go.sum b/go.sum index d9e1fca4..c02376b5 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/aws-controllers-k8s/ec2-controller v1.0.7 h1:7MDu2bq8NFKbgzzgHYPFRT7b github.com/aws-controllers-k8s/ec2-controller v1.0.7/go.mod h1:PvsQehgncHgcu9FiY13M45+GkVsKI98g7G83SrgH7vY= github.com/aws-controllers-k8s/runtime v0.34.0 h1:pz8MTzz8bY9JMTSMjvWx9SAJ6bJQIEx5ZrXw6wS74mc= github.com/aws-controllers-k8s/runtime v0.34.0/go.mod h1:aCud9ahYydZ22JhBStUOW2hnzyE1lWPhGAfxW5AW1YU= +github.com/aws-controllers-k8s/sns-controller v1.0.11 h1:nnkywTHzO64y7RrrfoPNyYf1TOkkQHtlg+S0jEPKUZ8= +github.com/aws-controllers-k8s/sns-controller v1.0.11/go.mod h1:ODQIDZR3hHQqcyif4UXVFQfEzTaWU1jqFtVr83K2p9M= github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml new file mode 100644 index 00000000..bc042534 --- /dev/null +++ b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml @@ -0,0 +1,871 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: cacheclusters.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: CacheCluster + listKind: CacheClusterList + plural: cacheclusters + singular: cachecluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.engineVersion + name: VERSION + type: string + - jsonPath: .status.cacheClusterStatus + name: STATUS + type: string + - jsonPath: .status.configurationEndpoint.address + name: ENDPOINT + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CacheCluster is the Schema for the CacheClusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + CacheClusterSpec defines the desired state of CacheCluster. + + + Contains all of the attributes of a specific cluster. + properties: + authToken: + description: |- + Reserved parameter. The password used to access a password protected server. + + + Password constraints: + + + - Must be only printable ASCII characters. + + + - Must be at least 16 characters and no more than 128 characters in length. + + + - The only permitted printable special characters are !, &, #, $, ^, <, + >, and -. Other printable special characters cannot be used in the AUTH + token. + + + For more information, see AUTH password (http://redis.io/commands/AUTH) at + http://redis.io/commands/AUTH. + properties: + key: + description: Key is the key within the secret + type: string + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + required: + - key + type: object + x-kubernetes-map-type: atomic + autoMinorVersionUpgrade: + description: |- + If you are running Redis engine version 6.0 or later, set this parameter + to yes if you want to opt-in to the next auto minor version upgrade campaign. + This parameter is disabled for previous versions. + type: boolean + azMode: + description: |- + Specifies whether the nodes in this Memcached cluster are created in a single + Availability Zone or created across multiple Availability Zones in the cluster's + region. + + + This parameter is only supported for Memcached clusters. + + + If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + assumes single-az mode. + type: string + cacheClusterID: + description: |- + The node group (shard) identifier. This parameter is stored as a lowercase + string. + + + Constraints: + + + - A name must contain from 1 to 50 alphanumeric characters or hyphens. + + + - The first character must be a letter. + + + - A name cannot end with a hyphen or contain two consecutive hyphens. + type: string + cacheNodeType: + description: |- + The compute and memory capacity of the nodes in the node group (shard). + + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + + - General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis engine version 5.0.6 onward and + Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + + - Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + + - Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + + Additional node type info + + + - All current generation instance types are created in Amazon VPC by default. + + + - Redis append-only files (AOF) are not supported for T1 or T2 instances. + + + - Redis Multi-AZ with automatic failover is not supported on T1 instances. + + + - Redis configuration variables appendonly and appendfsync are not supported + on Redis version 2.8.22 and later. + type: string + cacheParameterGroupName: + description: |- + The name of the parameter group to associate with this cluster. If this argument + is omitted, the default parameter group for the specified engine is used. + You cannot use any parameter group which has cluster-enabled='yes' when creating + a cluster. + type: string + cacheParameterGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + cacheSecurityGroupNames: + description: |- + A list of security group names to associate with this cluster. + + + Use this parameter only when you are creating a cluster outside of an Amazon + Virtual Private Cloud (Amazon VPC). + items: + type: string + type: array + cacheSubnetGroupName: + description: |- + The name of the subnet group to be used for the cluster. + + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + + + If you're going to launch your cluster in an Amazon VPC, you need to create + a subnet group before you start creating a cluster. For more information, + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + type: string + cacheSubnetGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + engine: + description: |- + The name of the cache engine to be used for this cluster. + + + Valid values for this parameter are: memcached | redis + type: string + engineVersion: + description: |- + The version number of the cache engine to be used for this cluster. To view + the supported cache engine versions, use the DescribeCacheEngineVersions + operation. + + + Important: You can upgrade to a newer engine version (see Selecting a Cache + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + but you cannot downgrade to an earlier engine version. If you want to use + an earlier engine version, you must delete the existing cluster or replication + group and create it anew with the earlier engine version. + type: string + ipDiscovery: + description: |- + The network type you choose when modifying a cluster, either ipv4 | ipv6. + IPv6 is supported for workloads using Redis engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string + logDeliveryConfigurations: + description: Specifies the destination, format and type of the logs. + items: + description: Specifies the destination, format and type of the logs. + properties: + destinationDetails: + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. + properties: + cloudWatchLogsDetails: + description: The configuration details of the CloudWatch + Logs destination. + properties: + logGroup: + type: string + type: object + kinesisFirehoseDetails: + description: The configuration details of the Kinesis Data + Firehose destination. + properties: + deliveryStream: + type: string + type: object + type: object + destinationType: + type: string + enabled: + type: boolean + logFormat: + type: string + logType: + type: string + type: object + type: array + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on + all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + type: string + notificationTopicARN: + description: |- + The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + (SNS) topic to which notifications are sent. + + + The Amazon SNS topic owner must be the same as the cluster owner. + type: string + notificationTopicRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + numCacheNodes: + description: |- + The initial number of cache nodes that the cluster has. + + + For clusters running Redis, this value must be 1. For clusters running Memcached, + this value must be between 1 and 40. + + + If you need more than 40 nodes for your Memcached cluster, please fill out + the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + format: int64 + type: integer + outpostMode: + description: |- + Specifies whether the nodes in the cluster are created in a single outpost + or across multiple outposts. + type: string + port: + description: The port number on which each of the cache nodes accepts + connections. + format: int64 + type: integer + preferredAvailabilityZone: + description: |- + The EC2 Availability Zone in which the cluster is created. + + + All nodes belonging to this cluster are placed in the preferred Availability + Zone. If you want to create your nodes across multiple Availability Zones, + use PreferredAvailabilityZones. + + + Default: System chosen Availability Zone. + type: string + preferredAvailabilityZones: + description: |- + A list of the Availability Zones in which cache nodes are created. The order + of the zones in the list is not important. + + + This option is only supported on Memcached. + + + If you are creating your cluster in an Amazon VPC (recommended) you can only + locate nodes in Availability Zones that are associated with the subnets in + the selected subnet group. + + + The number of Availability Zones listed must equal the value of NumCacheNodes. + + + If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + instead, or repeat the Availability Zone multiple times in the list. + + + Default: System chosen Availability Zones. + items: + type: string + type: array + preferredMaintenanceWindow: + description: |- + Specifies the weekly time range during which maintenance on the cluster is + performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. + type: string + preferredOutpostARN: + description: The outpost ARN in which the cache cluster is created. + type: string + preferredOutpostARNs: + description: The outpost ARNs in which the cache cluster is created. + items: + type: string + type: array + replicationGroupID: + description: |- + The ID of the replication group to which this cluster should belong. If this + parameter is specified, the cluster is added to the specified replication + group as a read replica; otherwise, the cluster is a standalone primary that + is not part of any replication group. + + + If the specified replication group is Multi-AZ enabled and the Availability + Zone is not specified, the cluster is created in Availability Zones that + provide the best spread of read replicas across Availability Zones. + + + This parameter is only valid if the Engine parameter is redis. + type: string + replicationGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + securityGroupIDs: + description: |- + One or more VPC security groups associated with the cluster. + + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + items: + type: string + type: array + snapshotARNs: + description: |- + A single-element string list containing an Amazon Resource Name (ARN) that + uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + file is used to populate the node group (shard). The Amazon S3 object name + in the ARN cannot contain any commas. + + + This parameter is only valid if the Engine parameter is redis. + + + Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + items: + type: string + type: array + snapshotName: + description: |- + The name of a Redis snapshot from which to restore data into the new node + group (shard). The snapshot status changes to restoring while the new node + group (shard) is being created. + + + This parameter is only valid if the Engine parameter is redis. + type: string + snapshotRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + type: object + type: object + snapshotRetentionLimit: + description: |- + The number of days for which ElastiCache retains automatic snapshots before + deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + taken today is retained for 5 days before being deleted. + + + This parameter is only valid if the Engine parameter is redis. + + + Default: 0 (i.e., automatic backups are disabled for this cache cluster). + format: int64 + type: integer + snapshotWindow: + description: |- + The daily time range (in UTC) during which ElastiCache begins taking a daily + snapshot of your node group (shard). + + + Example: 05:00-09:00 + + + If you do not specify this parameter, ElastiCache automatically chooses an + appropriate time range. + + + This parameter is only valid if the Engine parameter is redis. + type: string + tags: + description: A list of tags to be added to this resource. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + transitEncryptionEnabled: + description: A flag that enables in-transit encryption when set to + true. + type: boolean + required: + - cacheClusterID + type: object + status: + description: CacheClusterStatus defines the observed state of CacheCluster + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + atRestEncryptionEnabled: + description: |- + A flag that enables encryption at-rest when set to true. + + + You cannot modify the value of AtRestEncryptionEnabled after the cluster + is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + to true when you create a cluster. + + + Required: Only available when creating a replication group in an Amazon VPC + using redis version 3.2.6, 4.x or later. + + + Default: false + type: boolean + authTokenEnabled: + description: |- + A flag that enables using an AuthToken (password) when issuing Redis commands. + + + Default: false + type: boolean + authTokenLastModifiedDate: + description: The date the auth token was last modified + format: date-time + type: string + cacheClusterCreateTime: + description: The date and time when the cluster was created. + format: date-time + type: string + cacheClusterStatus: + description: |- + The current state of this cluster, one of the following values: available, + creating, deleted, deleting, incompatible-network, modifying, rebooting cluster + nodes, restore-failed, or snapshotting. + type: string + cacheNodes: + description: A list of cache nodes that are members of the cluster. + items: + description: |- + Represents an individual cache node within a cluster. Each cache node runs + its own instance of the cluster's protocol-compliant caching software - either + Memcached or Redis. + + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + + - General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis engine version 5.0.6 onward and + Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + + - Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + + - Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis engine version 5.0.6 onward and + for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + + Additional node type info + + + - All current generation instance types are created in Amazon VPC by default. + + + - Redis append-only files (AOF) are not supported for T1 or T2 instances. + + + - Redis Multi-AZ with automatic failover is not supported on T1 instances. + + + - Redis configuration variables appendonly and appendfsync are not supported + on Redis version 2.8.22 and later. + properties: + cacheNodeCreateTime: + format: date-time + type: string + cacheNodeID: + type: string + cacheNodeStatus: + type: string + customerAvailabilityZone: + type: string + customerOutpostARN: + type: string + endpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + parameterGroupStatus: + type: string + sourceCacheNodeID: + type: string + type: object + type: array + cacheParameterGroup: + description: Status of the cache parameter group. + properties: + cacheNodeIDsToReboot: + items: + type: string + type: array + cacheParameterGroupName: + type: string + parameterApplyStatus: + type: string + type: object + cacheSecurityGroups: + description: A list of cache security group elements, composed of + name and status sub-elements. + items: + description: Represents a cluster's status within a particular cache + security group. + properties: + cacheSecurityGroupName: + type: string + status: + type: string + type: object + type: array + clientDownloadLandingPage: + description: |- + The URL of the web page where you can download the latest ElastiCache client + library. + type: string + conditions: + description: |- + All CRS managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + configurationEndpoint: + description: |- + Represents a Memcached cluster endpoint which can be used by an application + to connect to any node in the cluster. The configuration endpoint will always + have .cfg in it. + + + Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + notificationConfiguration: + description: |- + Describes a notification topic and its status. Notification topics are used + for publishing ElastiCache events to subscribers using Amazon Simple Notification + Service (SNS). + properties: + topicARN: + type: string + topicStatus: + type: string + type: object + pendingModifiedValues: + description: |- + A group of settings that are applied to the cluster in the future, or that + are currently being applied. + properties: + authTokenStatus: + type: string + cacheNodeIDsToRemove: + items: + type: string + type: array + cacheNodeType: + type: string + engineVersion: + type: string + numCacheNodes: + format: int64 + type: integer + transitEncryptionEnabled: + type: boolean + transitEncryptionMode: + type: string + type: object + replicationGroupLogDeliveryEnabled: + description: |- + A boolean value indicating whether log delivery is enabled for the replication + group. + type: boolean + securityGroups: + description: A list of VPC Security Groups associated with the cluster. + items: + description: Represents a single cache security group and its status. + properties: + securityGroupID: + type: string + status: + type: string + type: object + type: array + transitEncryptionMode: + description: |- + A setting that allows you to migrate your clients to use in-transit encryption, + with no downtime. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl index 348d5dbc..cb8aba9a 100644 --- a/helm/templates/_helpers.tpl +++ b/helm/templates/_helpers.tpl @@ -105,6 +105,26 @@ rules: verbs: - get - list +- apiGroups: + - elasticache.services.k8s.aws + resources: + - cacheclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - elasticache.services.k8s.aws + resources: + - cacheclusters/status + verbs: + - get + - patch + - update - apiGroups: - elasticache.services.k8s.aws resources: @@ -265,4 +285,18 @@ rules: - get - patch - update +- apiGroups: + - sns.services.k8s.aws + resources: + - topics + verbs: + - get + - list +- apiGroups: + - sns.services.k8s.aws + resources: + - topics/status + verbs: + - get + - list {{- end }} \ No newline at end of file diff --git a/helm/templates/role-reader.yaml b/helm/templates/role-reader.yaml index aa520863..cb2bf63c 100644 --- a/helm/templates/role-reader.yaml +++ b/helm/templates/role-reader.yaml @@ -9,6 +9,7 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups diff --git a/helm/templates/role-writer.yaml b/helm/templates/role-writer.yaml index bc45daa2..f4b7918e 100644 --- a/helm/templates/role-writer.yaml +++ b/helm/templates/role-writer.yaml @@ -9,6 +9,7 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups @@ -26,6 +27,7 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups diff --git a/pkg/resource/cache_cluster/custom_set_output.go b/pkg/resource/cache_cluster/custom_set_output.go new file mode 100644 index 00000000..61e511c5 --- /dev/null +++ b/pkg/resource/cache_cluster/custom_set_output.go @@ -0,0 +1,81 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache_cluster + +import ( + "context" + "encoding/json" + + "github.com/aws/aws-sdk-go/service/elasticache" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +func (rm *resourceManager) customCreateCacheClusterSetOutput( + _ context.Context, + r *resource, + _ *elasticache.CreateCacheClusterOutput, + ko *svcapitypes.CacheCluster, +) (*svcapitypes.CacheCluster, error) { + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +func (rm *resourceManager) customModifyCacheClusterSetOutput( + _ context.Context, + r *resource, + _ *elasticache.ModifyCacheClusterOutput, + ko *svcapitypes.CacheCluster, +) (*svcapitypes.CacheCluster, error) { + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +// setAnnotationsFields copies the desired object's annotations, populates any +// relevant fields, and sets the latest object's annotations to this newly populated map. +// Fields that are handled by custom modify implementation are not set here. +// This should only be called upon a successful create or modify call. +func (rm *resourceManager) setAnnotationsFields( + r *resource, + ko *svcapitypes.CacheCluster, +) { + annotations := getAnnotationsFields(r, ko) + annotations[AnnotationLastRequestedPAZs] = marshalAsAnnotation(r.ko.Spec.PreferredAvailabilityZones) + ko.ObjectMeta.Annotations = annotations +} + +// getAnnotationsFields return the annotations map that would be used to set the fields. +func getAnnotationsFields( + r *resource, + ko *svcapitypes.CacheCluster, +) map[string]string { + if ko.ObjectMeta.Annotations != nil { + return ko.ObjectMeta.Annotations + } + desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() + annotations := make(map[string]string) + for k, v := range desiredAnnotations { + annotations[k] = v + } + ko.ObjectMeta.Annotations = annotations + return annotations +} + +func marshalAsAnnotation(val interface{}) string { + data, err := json.Marshal(val) + if err != nil { + return "null" + } + return string(data) +} diff --git a/pkg/resource/cache_cluster/custom_update_input_test.go b/pkg/resource/cache_cluster/custom_update_input_test.go new file mode 100644 index 00000000..a535948f --- /dev/null +++ b/pkg/resource/cache_cluster/custom_update_input_test.go @@ -0,0 +1,192 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache_cluster + +import ( + "testing" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zapcore" + ctrlrtzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +func resourceWithSpec(spec svcapitypes.CacheClusterSpec) *resource { + return newResource(spec, svcapitypes.CacheClusterStatus{}) +} + +func newResource(spec svcapitypes.CacheClusterSpec, status svcapitypes.CacheClusterStatus) *resource { + return &resource{ + ko: &svcapitypes.CacheCluster{ + Spec: spec, + Status: status, + }, + } +} + +func provideResourceManager() *resourceManager { + zapOptions := ctrlrtzap.Options{ + Development: true, + Level: zapcore.InfoLevel, + } + fakeLogger := ctrlrtzap.New(ctrlrtzap.UseFlagOptions(&zapOptions)) + return &resourceManager{ + log: fakeLogger, + metrics: ackmetrics.NewMetrics("elasticache"), + } +} + +func TestCustomUpdateInput(t *testing.T) { + tests := []struct { + description string + desired *resource + latest *resource + makeDelta func() *ackcompare.Delta + + expectedPayload *elasticache.ModifyCacheClusterInput + expectedErr string + }{ + { + description: "no changes", + desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(1), + }), + latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(1), + }), + makeDelta: ackcompare.NewDelta, + + expectedPayload: &elasticache.ModifyCacheClusterInput{}, + }, + { + description: "increase NumCacheNodes with new PreferredAvailabilityZones", + desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(3), + PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), + }), + latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(1), + }), + makeDelta: func() *ackcompare.Delta { + var delta ackcompare.Delta + delta.Add("Spec.NumCacheNodes", aws.Int64(3), aws.Int64(1)) + delta.Add("Spec.PreferredAvailabilityZones", aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), nil) + return &delta + }, + + expectedPayload: &elasticache.ModifyCacheClusterInput{ + NewAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), + }, + }, + { + description: "increase NumCacheNodes again with new PreferredAvailabilityZones", + desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(5), + PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b", "us-west-2c", "us-west-2b"}), + }), + latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(3), + PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), + }), + makeDelta: func() *ackcompare.Delta { + var delta ackcompare.Delta + delta.Add("Spec.NumCacheNodes", aws.Int64(5), aws.Int64(3)) + delta.Add("Spec.PreferredAvailabilityZones", aws.StringSlice([]string{"us-west-2a", "us-west-2b", "us-west-2c", "us-west-2b"}), + aws.StringSlice([]string{"us-west-2a", "us-west-2b"})) + return &delta + }, + + expectedPayload: &elasticache.ModifyCacheClusterInput{ + NewAvailabilityZones: aws.StringSlice([]string{"us-west-2c", "us-west-2b"}), + }, + }, + { + description: "decrease NumCacheNodes", + desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(3), + }), + latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(5), + }), + makeDelta: func() *ackcompare.Delta { + var delta ackcompare.Delta + delta.Add("Spec.NumCacheNodes", aws.Int64(3), aws.Int64(5)) + return &delta + }, + expectedPayload: &elasticache.ModifyCacheClusterInput{ + CacheNodeIdsToRemove: aws.StringSlice([]string{"0005", "0004"}), + }, + }, + { + description: "PreferredAvailabilityZones changed with no change in NumCacheNodes", + desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ + PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2c"}), + NumCacheNodes: aws.Int64(3), + }), + latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(3), + }), + makeDelta: func() *ackcompare.Delta { + var delta ackcompare.Delta + delta.Add("Spec.PreferredAvailabilityZones", aws.StringSlice([]string{"us-west-2a"}), nil) + return &delta + }, + expectedErr: "spec.preferredAvailabilityZones can only be changed when new nodes are being added via spec.numCacheNodes", + }, + { + description: "decrease NumCacheNodes when a modification is pending", + desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(3), + }), + latest: newResource(svcapitypes.CacheClusterSpec{ + NumCacheNodes: aws.Int64(5), + PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), + }, svcapitypes.CacheClusterStatus{ + PendingModifiedValues: &svcapitypes.PendingModifiedValues{ + NumCacheNodes: aws.Int64(7), + }, + }), + makeDelta: func() *ackcompare.Delta { + var delta ackcompare.Delta + delta.Add("Spec.NumCacheNodes", aws.Int64(3), aws.Int64(5)) + return &delta + }, + + expectedPayload: &elasticache.ModifyCacheClusterInput{ + CacheNodeIdsToRemove: aws.StringSlice([]string{"0007", "0006", "0005", "0004"}), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + assert := assert.New(t) + rm := provideResourceManager() + var input elasticache.ModifyCacheClusterInput + err := rm.updateCacheClusterPayload(&input, tt.desired, tt.latest, tt.makeDelta()) + if tt.expectedErr != "" { + assert.NotNil(err) + assert.Contains(err.Error(), tt.expectedErr) + return + } + assert.Nil(err) + assert.Equal(tt.expectedPayload, &input) + }) + } +} diff --git a/pkg/resource/cache_cluster/delta.go b/pkg/resource/cache_cluster/delta.go new file mode 100644 index 00000000..f2198593 --- /dev/null +++ b/pkg/resource/cache_cluster/delta.go @@ -0,0 +1,256 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "bytes" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" +) + +// Hack to avoid import errors during build... +var ( + _ = &bytes.Buffer{} + _ = &reflect.Method{} + _ = &acktags.Tags{} +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.AZMode, b.ko.Spec.AZMode) { + delta.Add("Spec.AZMode", a.ko.Spec.AZMode, b.ko.Spec.AZMode) + } else if a.ko.Spec.AZMode != nil && b.ko.Spec.AZMode != nil { + if *a.ko.Spec.AZMode != *b.ko.Spec.AZMode { + delta.Add("Spec.AZMode", a.ko.Spec.AZMode, b.ko.Spec.AZMode) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.AuthToken, b.ko.Spec.AuthToken) { + delta.Add("Spec.AuthToken", a.ko.Spec.AuthToken, b.ko.Spec.AuthToken) + } else if a.ko.Spec.AuthToken != nil && b.ko.Spec.AuthToken != nil { + if *a.ko.Spec.AuthToken != *b.ko.Spec.AuthToken { + delta.Add("Spec.AuthToken", a.ko.Spec.AuthToken, b.ko.Spec.AuthToken) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.AutoMinorVersionUpgrade, b.ko.Spec.AutoMinorVersionUpgrade) { + delta.Add("Spec.AutoMinorVersionUpgrade", a.ko.Spec.AutoMinorVersionUpgrade, b.ko.Spec.AutoMinorVersionUpgrade) + } else if a.ko.Spec.AutoMinorVersionUpgrade != nil && b.ko.Spec.AutoMinorVersionUpgrade != nil { + if *a.ko.Spec.AutoMinorVersionUpgrade != *b.ko.Spec.AutoMinorVersionUpgrade { + delta.Add("Spec.AutoMinorVersionUpgrade", a.ko.Spec.AutoMinorVersionUpgrade, b.ko.Spec.AutoMinorVersionUpgrade) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheClusterID, b.ko.Spec.CacheClusterID) { + delta.Add("Spec.CacheClusterID", a.ko.Spec.CacheClusterID, b.ko.Spec.CacheClusterID) + } else if a.ko.Spec.CacheClusterID != nil && b.ko.Spec.CacheClusterID != nil { + if *a.ko.Spec.CacheClusterID != *b.ko.Spec.CacheClusterID { + delta.Add("Spec.CacheClusterID", a.ko.Spec.CacheClusterID, b.ko.Spec.CacheClusterID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheNodeType, b.ko.Spec.CacheNodeType) { + delta.Add("Spec.CacheNodeType", a.ko.Spec.CacheNodeType, b.ko.Spec.CacheNodeType) + } else if a.ko.Spec.CacheNodeType != nil && b.ko.Spec.CacheNodeType != nil { + if *a.ko.Spec.CacheNodeType != *b.ko.Spec.CacheNodeType { + delta.Add("Spec.CacheNodeType", a.ko.Spec.CacheNodeType, b.ko.Spec.CacheNodeType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheParameterGroupName, b.ko.Spec.CacheParameterGroupName) { + delta.Add("Spec.CacheParameterGroupName", a.ko.Spec.CacheParameterGroupName, b.ko.Spec.CacheParameterGroupName) + } else if a.ko.Spec.CacheParameterGroupName != nil && b.ko.Spec.CacheParameterGroupName != nil { + if *a.ko.Spec.CacheParameterGroupName != *b.ko.Spec.CacheParameterGroupName { + delta.Add("Spec.CacheParameterGroupName", a.ko.Spec.CacheParameterGroupName, b.ko.Spec.CacheParameterGroupName) + } + } + if !reflect.DeepEqual(a.ko.Spec.CacheParameterGroupRef, b.ko.Spec.CacheParameterGroupRef) { + delta.Add("Spec.CacheParameterGroupRef", a.ko.Spec.CacheParameterGroupRef, b.ko.Spec.CacheParameterGroupRef) + } + if len(a.ko.Spec.CacheSecurityGroupNames) != len(b.ko.Spec.CacheSecurityGroupNames) { + delta.Add("Spec.CacheSecurityGroupNames", a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) + } else if len(a.ko.Spec.CacheSecurityGroupNames) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) { + delta.Add("Spec.CacheSecurityGroupNames", a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) { + delta.Add("Spec.CacheSubnetGroupName", a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) + } else if a.ko.Spec.CacheSubnetGroupName != nil && b.ko.Spec.CacheSubnetGroupName != nil { + if *a.ko.Spec.CacheSubnetGroupName != *b.ko.Spec.CacheSubnetGroupName { + delta.Add("Spec.CacheSubnetGroupName", a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) + } + } + if !reflect.DeepEqual(a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) { + delta.Add("Spec.CacheSubnetGroupRef", a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.Engine, b.ko.Spec.Engine) { + delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) + } else if a.ko.Spec.Engine != nil && b.ko.Spec.Engine != nil { + if *a.ko.Spec.Engine != *b.ko.Spec.Engine { + delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) { + delta.Add("Spec.EngineVersion", a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) + } else if a.ko.Spec.EngineVersion != nil && b.ko.Spec.EngineVersion != nil { + if *a.ko.Spec.EngineVersion != *b.ko.Spec.EngineVersion { + delta.Add("Spec.EngineVersion", a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } else if a.ko.Spec.IPDiscovery != nil && b.ko.Spec.IPDiscovery != nil { + if *a.ko.Spec.IPDiscovery != *b.ko.Spec.IPDiscovery { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } else if a.ko.Spec.NetworkType != nil && b.ko.Spec.NetworkType != nil { + if *a.ko.Spec.NetworkType != *b.ko.Spec.NetworkType { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) { + delta.Add("Spec.NotificationTopicARN", a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) + } else if a.ko.Spec.NotificationTopicARN != nil && b.ko.Spec.NotificationTopicARN != nil { + if *a.ko.Spec.NotificationTopicARN != *b.ko.Spec.NotificationTopicARN { + delta.Add("Spec.NotificationTopicARN", a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) + } + } + if !reflect.DeepEqual(a.ko.Spec.NotificationTopicRef, b.ko.Spec.NotificationTopicRef) { + delta.Add("Spec.NotificationTopicRef", a.ko.Spec.NotificationTopicRef, b.ko.Spec.NotificationTopicRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.NumCacheNodes, b.ko.Spec.NumCacheNodes) { + delta.Add("Spec.NumCacheNodes", a.ko.Spec.NumCacheNodes, b.ko.Spec.NumCacheNodes) + } else if a.ko.Spec.NumCacheNodes != nil && b.ko.Spec.NumCacheNodes != nil { + if *a.ko.Spec.NumCacheNodes != *b.ko.Spec.NumCacheNodes { + delta.Add("Spec.NumCacheNodes", a.ko.Spec.NumCacheNodes, b.ko.Spec.NumCacheNodes) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.OutpostMode, b.ko.Spec.OutpostMode) { + delta.Add("Spec.OutpostMode", a.ko.Spec.OutpostMode, b.ko.Spec.OutpostMode) + } else if a.ko.Spec.OutpostMode != nil && b.ko.Spec.OutpostMode != nil { + if *a.ko.Spec.OutpostMode != *b.ko.Spec.OutpostMode { + delta.Add("Spec.OutpostMode", a.ko.Spec.OutpostMode, b.ko.Spec.OutpostMode) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Port, b.ko.Spec.Port) { + delta.Add("Spec.Port", a.ko.Spec.Port, b.ko.Spec.Port) + } else if a.ko.Spec.Port != nil && b.ko.Spec.Port != nil { + if *a.ko.Spec.Port != *b.ko.Spec.Port { + delta.Add("Spec.Port", a.ko.Spec.Port, b.ko.Spec.Port) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PreferredAvailabilityZone, b.ko.Spec.PreferredAvailabilityZone) { + delta.Add("Spec.PreferredAvailabilityZone", a.ko.Spec.PreferredAvailabilityZone, b.ko.Spec.PreferredAvailabilityZone) + } else if a.ko.Spec.PreferredAvailabilityZone != nil && b.ko.Spec.PreferredAvailabilityZone != nil { + if *a.ko.Spec.PreferredAvailabilityZone != *b.ko.Spec.PreferredAvailabilityZone { + delta.Add("Spec.PreferredAvailabilityZone", a.ko.Spec.PreferredAvailabilityZone, b.ko.Spec.PreferredAvailabilityZone) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) { + delta.Add("Spec.PreferredMaintenanceWindow", a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) + } else if a.ko.Spec.PreferredMaintenanceWindow != nil && b.ko.Spec.PreferredMaintenanceWindow != nil { + if *a.ko.Spec.PreferredMaintenanceWindow != *b.ko.Spec.PreferredMaintenanceWindow { + delta.Add("Spec.PreferredMaintenanceWindow", a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PreferredOutpostARN, b.ko.Spec.PreferredOutpostARN) { + delta.Add("Spec.PreferredOutpostARN", a.ko.Spec.PreferredOutpostARN, b.ko.Spec.PreferredOutpostARN) + } else if a.ko.Spec.PreferredOutpostARN != nil && b.ko.Spec.PreferredOutpostARN != nil { + if *a.ko.Spec.PreferredOutpostARN != *b.ko.Spec.PreferredOutpostARN { + delta.Add("Spec.PreferredOutpostARN", a.ko.Spec.PreferredOutpostARN, b.ko.Spec.PreferredOutpostARN) + } + } + if len(a.ko.Spec.PreferredOutpostARNs) != len(b.ko.Spec.PreferredOutpostARNs) { + delta.Add("Spec.PreferredOutpostARNs", a.ko.Spec.PreferredOutpostARNs, b.ko.Spec.PreferredOutpostARNs) + } else if len(a.ko.Spec.PreferredOutpostARNs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.PreferredOutpostARNs, b.ko.Spec.PreferredOutpostARNs) { + delta.Add("Spec.PreferredOutpostARNs", a.ko.Spec.PreferredOutpostARNs, b.ko.Spec.PreferredOutpostARNs) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ReplicationGroupID, b.ko.Spec.ReplicationGroupID) { + delta.Add("Spec.ReplicationGroupID", a.ko.Spec.ReplicationGroupID, b.ko.Spec.ReplicationGroupID) + } else if a.ko.Spec.ReplicationGroupID != nil && b.ko.Spec.ReplicationGroupID != nil { + if *a.ko.Spec.ReplicationGroupID != *b.ko.Spec.ReplicationGroupID { + delta.Add("Spec.ReplicationGroupID", a.ko.Spec.ReplicationGroupID, b.ko.Spec.ReplicationGroupID) + } + } + if !reflect.DeepEqual(a.ko.Spec.ReplicationGroupRef, b.ko.Spec.ReplicationGroupRef) { + delta.Add("Spec.ReplicationGroupRef", a.ko.Spec.ReplicationGroupRef, b.ko.Spec.ReplicationGroupRef) + } + if len(a.ko.Spec.SecurityGroupIDs) != len(b.ko.Spec.SecurityGroupIDs) { + delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } else if len(a.ko.Spec.SecurityGroupIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) { + delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } + } + if len(a.ko.Spec.SnapshotARNs) != len(b.ko.Spec.SnapshotARNs) { + delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) + } else if len(a.ko.Spec.SnapshotARNs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) { + delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) { + delta.Add("Spec.SnapshotName", a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) + } else if a.ko.Spec.SnapshotName != nil && b.ko.Spec.SnapshotName != nil { + if *a.ko.Spec.SnapshotName != *b.ko.Spec.SnapshotName { + delta.Add("Spec.SnapshotName", a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) + } + } + if !reflect.DeepEqual(a.ko.Spec.SnapshotRef, b.ko.Spec.SnapshotRef) { + delta.Add("Spec.SnapshotRef", a.ko.Spec.SnapshotRef, b.ko.Spec.SnapshotRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) { + delta.Add("Spec.SnapshotRetentionLimit", a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) + } else if a.ko.Spec.SnapshotRetentionLimit != nil && b.ko.Spec.SnapshotRetentionLimit != nil { + if *a.ko.Spec.SnapshotRetentionLimit != *b.ko.Spec.SnapshotRetentionLimit { + delta.Add("Spec.SnapshotRetentionLimit", a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.SnapshotWindow, b.ko.Spec.SnapshotWindow) { + delta.Add("Spec.SnapshotWindow", a.ko.Spec.SnapshotWindow, b.ko.Spec.SnapshotWindow) + } else if a.ko.Spec.SnapshotWindow != nil && b.ko.Spec.SnapshotWindow != nil { + if *a.ko.Spec.SnapshotWindow != *b.ko.Spec.SnapshotWindow { + delta.Add("Spec.SnapshotWindow", a.ko.Spec.SnapshotWindow, b.ko.Spec.SnapshotWindow) + } + } + if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) + } + if ackcompare.HasNilDifference(a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) { + delta.Add("Spec.TransitEncryptionEnabled", a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) + } else if a.ko.Spec.TransitEncryptionEnabled != nil && b.ko.Spec.TransitEncryptionEnabled != nil { + if *a.ko.Spec.TransitEncryptionEnabled != *b.ko.Spec.TransitEncryptionEnabled { + delta.Add("Spec.TransitEncryptionEnabled", a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) + } + } + + modifyDelta(delta, a, b) + return delta +} diff --git a/pkg/resource/cache_cluster/delta_util.go b/pkg/resource/cache_cluster/delta_util.go new file mode 100644 index 00000000..12a84867 --- /dev/null +++ b/pkg/resource/cache_cluster/delta_util.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache_cluster + +import ( + "encoding/json" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + + "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" +) + +// modifyDelta removes non-meaningful differences from the delta and adds additional differences if necessary. +func modifyDelta( + delta *ackcompare.Delta, + desired *resource, + latest *resource, +) { + if delta.DifferentAt("Spec.EngineVersion") && desired.ko.Spec.EngineVersion != nil && latest.ko.Spec.EngineVersion != nil && + util.EngineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { + common.RemoveFromDelta(delta, "Spec.EngineVersion") + // TODO: handle the case of a nil difference (especially when desired EV is nil) + } + + // if server has given PreferredMaintenanceWindow a default value, no action needs to be taken. + if delta.DifferentAt("Spec.PreferredMaintenanceWindow") && desired.ko.Spec.PreferredMaintenanceWindow == nil && + latest.ko.Spec.PreferredMaintenanceWindow != nil { + common.RemoveFromDelta(delta, "Spec.PreferredMaintenanceWindow") + } + + if delta.DifferentAt("Spec.PreferredAvailabilityZone") && desired.ko.Spec.PreferredAvailabilityZone == nil && + latest.ko.Spec.PreferredAvailabilityZone != nil { + common.RemoveFromDelta(delta, "Spec.PreferredAvailabilityZone") + } + + updatePAZsDelta(desired, delta) +} + +// updatePAZsDelta retrieves the last requested configurations saved in annotations and compares them +// to the current desired configurations. If a diff is found, it adds it to delta. +func updatePAZsDelta(desired *resource, delta *ackcompare.Delta) { + var lastRequestedPAZs []*string + unmarshalAnnotation(desired, AnnotationLastRequestedPAZs, &lastRequestedPAZs) + if !reflect.DeepEqual(desired.ko.Spec.PreferredAvailabilityZones, lastRequestedPAZs) { + delta.Add("Spec.PreferredAvailabilityZones", desired.ko.Spec.PreferredAvailabilityZones, + lastRequestedPAZs) + } +} + +func unmarshalAnnotation(desired *resource, annotation string, val interface{}) { + if data, ok := desired.ko.ObjectMeta.GetAnnotations()[annotation]; ok { + _ = json.Unmarshal([]byte(data), val) + } +} diff --git a/pkg/resource/cache_cluster/descriptor.go b/pkg/resource/cache_cluster/descriptor.go new file mode 100644 index 00000000..0ef12b91 --- /dev/null +++ b/pkg/resource/cache_cluster/descriptor.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +const ( + finalizerString = "finalizers.elasticache.services.k8s.aws/CacheCluster" +) + +var ( + GroupVersionResource = svcapitypes.GroupVersion.WithResource("cacheclusters") + GroupKind = metav1.GroupKind{ + Group: "elasticache.services.k8s.aws", + Kind: "CacheCluster", + } +) + +// resourceDescriptor implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceDescriptor` interface +type resourceDescriptor struct { +} + +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) +} + +// EmptyRuntimeObject returns an empty object prototype that may be used in +// apimachinery and k8s client operations +func (d *resourceDescriptor) EmptyRuntimeObject() rtclient.Object { + return &svcapitypes.CacheCluster{} +} + +// ResourceFromRuntimeObject returns an AWSResource that has been initialized +// with the supplied runtime.Object +func (d *resourceDescriptor) ResourceFromRuntimeObject( + obj rtclient.Object, +) acktypes.AWSResource { + return &resource{ + ko: obj.(*svcapitypes.CacheCluster), + } +} + +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) +} + +// IsManaged returns true if the supplied AWSResource is under the management +// of an ACK service controller. What this means in practice is that the +// underlying custom resource (CR) in the AWSResource has had a +// resource-specific finalizer associated with it. +func (d *resourceDescriptor) IsManaged( + res acktypes.AWSResource, +) bool { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + // Remove use of custom code once + // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is + // fixed. This should be able to be: + // + // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) + return containsFinalizer(obj, finalizerString) +} + +// Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 +// is fixed. +func containsFinalizer(obj rtclient.Object, finalizer string) bool { + f := obj.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// MarkManaged places the supplied resource under the management of ACK. What +// this typically means is that the resource manager will decorate the +// underlying custom resource (CR) with a finalizer that indicates ACK is +// managing the resource and the underlying CR may not be deleted until ACK is +// finished cleaning up any backend AWS service resources associated with the +// CR. +func (d *resourceDescriptor) MarkManaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.AddFinalizer(obj, finalizerString) +} + +// MarkUnmanaged removes the supplied resource from management by ACK. What +// this typically means is that the resource manager will remove a finalizer +// underlying custom resource (CR) that indicates ACK is managing the resource. +// This will allow the Kubernetes API server to delete the underlying CR. +func (d *resourceDescriptor) MarkUnmanaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.RemoveFinalizer(obj, finalizerString) +} + +// MarkAdopted places descriptors on the custom resource that indicate the +// resource was not created from within ACK. +func (d *resourceDescriptor) MarkAdopted( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeObject in AWSResource") + } + curr := obj.GetAnnotations() + if curr == nil { + curr = make(map[string]string) + } + curr[ackv1alpha1.AnnotationAdopted] = "true" + obj.SetAnnotations(curr) +} diff --git a/pkg/resource/cache_cluster/hooks.go b/pkg/resource/cache_cluster/hooks.go new file mode 100644 index 00000000..b0afced3 --- /dev/null +++ b/pkg/resource/cache_cluster/hooks.go @@ -0,0 +1,133 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache_cluster + +import ( + "context" + "errors" + "fmt" + "slices" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" +) + +const ( + statusCreating = "creating" + statusAvailable = "available" + statusModifying = "modifying" + statusDeleting = "deleting" +) + +const ( + // AnnotationLastRequestedPAZs is an annotation whose value is a JSON representation of []*string, + // passed in as input to either the create or modify API called most recently. + AnnotationLastRequestedPAZs = svcapitypes.AnnotationPrefix + "last-requested-preferred-availability-zones" +) + +var ( + condMsgCurrentlyDeleting = "CacheCluster is currently being deleted" + condMsgNoDeleteWhileModifying = "Cannot delete CacheCluster while it is being modified" + condMsgCurrentlyUpdating = "CacheCluster is currently being updated" +) + +var ( + requeueWaitWhileDeleting = ackrequeue.NeededAfter( + fmt.Errorf("CacheCluster is in %q state, it cannot be deleted", statusDeleting), + ackrequeue.DefaultRequeueAfterDuration, + ) + requeueWaitWhileModifying = ackrequeue.NeededAfter( + fmt.Errorf("CacheCluster is in %q state, it cannot be modified", statusModifying), + ackrequeue.DefaultRequeueAfterDuration, + ) +) + +func hasStatus(r *resource, status string) bool { + return r.ko.Status.CacheClusterStatus != nil && *r.ko.Status.CacheClusterStatus == status +} + +func isCreating(r *resource) bool { + return hasStatus(r, statusCreating) +} + +func isAvailable(r *resource) bool { + return hasStatus(r, statusAvailable) +} + +func isDeleting(r *resource) bool { + return hasStatus(r, statusDeleting) +} + +func isModifying(r *resource) bool { + return hasStatus(r, statusModifying) +} + +// getTags retrieves the resource's associated tags. +func (rm *resourceManager) getTags( + ctx context.Context, + resourceARN string, +) ([]*svcapitypes.Tag, error) { + return util.GetTags(ctx, rm.sdkapi, rm.metrics, resourceARN) +} + +// syncTags keeps the resource's tags in sync. +func (rm *resourceManager) syncTags( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + return util.SyncTags(ctx, desired.ko.Spec.Tags, latest.ko.Spec.Tags, latest.ko.Status.ACKResourceMetadata, ToACKTags, rm.sdkapi, rm.metrics) +} + +func (rm *resourceManager) updateCacheClusterPayload(input *svcsdk.ModifyCacheClusterInput, desired, latest *resource, delta *ackcompare.Delta) error { + desiredSpec := desired.ko.Spec + var nodesDelta int64 + if delta.DifferentAt("Spec.NumCacheNodes") && desired.ko.Spec.NumCacheNodes != nil { + numNodes := *latest.ko.Spec.NumCacheNodes + if pendingModifications := latest.ko.Status.PendingModifiedValues; pendingModifications != nil && + pendingModifications.NumCacheNodes != nil && *pendingModifications.NumCacheNodes > numNodes { + numNodes = *pendingModifications.NumCacheNodes + } + nodesDelta = numNodes - *desired.ko.Spec.NumCacheNodes + if nodesDelta > 0 { + for i := numNodes; i > numNodes-nodesDelta; i-- { + nodeID := fmt.Sprintf("%04d", i) + input.CacheNodeIdsToRemove = append(input.CacheNodeIdsToRemove, &nodeID) + } + } + } + + if idx := slices.IndexFunc(delta.Differences, func(diff *ackcompare.Difference) bool { + return diff.Path.Contains("Spec.PreferredAvailabilityZones") + }); idx != -1 && desired.ko.Spec.PreferredAvailabilityZones != nil { + if nodesDelta >= 0 { + return errors.New("spec.preferredAvailabilityZones can only be changed when new nodes are being added via spec.numCacheNodes") + } + + oldAZsLen := 0 + oldValues, ok := delta.Differences[idx].B.([]*string) + if ok { + oldAZsLen = len(oldValues) + } + if len(desiredSpec.PreferredAvailabilityZones) <= oldAZsLen { + return errors.New("newly specified AZs in spec.preferredAvailabilityZones must match the number of cache nodes being added") + } + input.NewAvailabilityZones = desiredSpec.PreferredAvailabilityZones[oldAZsLen:] + } + return nil +} diff --git a/pkg/resource/cache_cluster/identifiers.go b/pkg/resource/cache_cluster/identifiers.go new file mode 100644 index 00000000..4735c72f --- /dev/null +++ b/pkg/resource/cache_cluster/identifiers.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" +) + +// resourceIdentifiers implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceIdentifiers` interface +type resourceIdentifiers struct { + meta *ackv1alpha1.ResourceMetadata +} + +// ARN returns the AWS Resource Name for the backend AWS resource. If nil, +// this means the resource has not yet been created in the backend AWS +// service. +func (ri *resourceIdentifiers) ARN() *ackv1alpha1.AWSResourceName { + if ri.meta != nil { + return ri.meta.ARN + } + return nil +} + +// OwnerAccountID returns the AWS account identifier in which the +// backend AWS resource resides, or nil if this information is not known +// for the resource +func (ri *resourceIdentifiers) OwnerAccountID() *ackv1alpha1.AWSAccountID { + if ri.meta != nil { + return ri.meta.OwnerAccountID + } + return nil +} + +// Region returns the AWS region in which the resource exists, or +// nil if this information is not known. +func (ri *resourceIdentifiers) Region() *ackv1alpha1.AWSRegion { + if ri.meta != nil { + return ri.meta.Region + } + return nil +} diff --git a/pkg/resource/cache_cluster/manager.go b/pkg/resource/cache_cluster/manager.go new file mode 100644 index 00000000..9ce0dd7f --- /dev/null +++ b/pkg/resource/cache_cluster/manager.go @@ -0,0 +1,369 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "context" + "fmt" + "time" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrt "github.com/aws-controllers-k8s/runtime/pkg/runtime" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" + "github.com/aws/aws-sdk-go/aws/session" + svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = ackutil.InStrings + _ = acktags.NewTags() + _ = ackrt.MissingImageTagValue + _ = svcapitypes.CacheCluster{} +) + +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=cacheclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=cacheclusters/status,verbs=get;update;patch + +var lateInitializeFieldNames = []string{"PreferredAvailabilityZone"} + +// resourceManager is responsible for providing a consistent way to perform +// CRUD operations in a backend AWS service API for Book custom resources. +type resourceManager struct { + // cfg is a copy of the ackcfg.Config object passed on start of the service + // controller + cfg ackcfg.Config + // log refers to the logr.Logger object handling logging for the service + // controller + log logr.Logger + // metrics contains a collection of Prometheus metric objects that the + // service controller and its reconcilers track + metrics *ackmetrics.Metrics + // rr is the Reconciler which can be used for various utility + // functions such as querying for Secret values given a SecretReference + rr acktypes.Reconciler + // awsAccountID is the AWS account identifier that contains the resources + // managed by this resource manager + awsAccountID ackv1alpha1.AWSAccountID + // The AWS Region that this resource manager targets + awsRegion ackv1alpha1.AWSRegion + // sess is the AWS SDK Session object used to communicate with the backend + // AWS service API + sess *session.Session + // sdk is a pointer to the AWS service API interface exposed by the + // aws-sdk-go/services/{alias}/{alias}iface package. + sdkapi svcsdkapi.ElastiCacheAPI +} + +// concreteResource returns a pointer to a resource from the supplied +// generic AWSResource interface +func (rm *resourceManager) concreteResource( + res acktypes.AWSResource, +) *resource { + // cast the generic interface into a pointer type specific to the concrete + // implementing resource type managed by this resource manager + return res.(*resource) +} + +// ReadOne returns the currently-observed state of the supplied AWSResource in +// the backend AWS service API. +func (rm *resourceManager) ReadOne( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's ReadOne() method received resource with nil CR object") + } + observed, err := rm.sdkFind(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(observed) +} + +// Create attempts to create the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-created +// resource +func (rm *resourceManager) Create( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Create() method received resource with nil CR object") + } + created, err := rm.sdkCreate(ctx, r) + if err != nil { + if created != nil { + return rm.onError(created, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(created) +} + +// Update attempts to mutate the supplied desired AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-mutated +// resource. +// Note for specialized logic implementers can check to see how the latest +// observed resource differs from the supplied desired state. The +// higher-level reonciler determines whether or not the desired differs +// from the latest observed and decides whether to call the resource +// manager's Update method +func (rm *resourceManager) Update( + ctx context.Context, + resDesired acktypes.AWSResource, + resLatest acktypes.AWSResource, + delta *ackcompare.Delta, +) (acktypes.AWSResource, error) { + desired := rm.concreteResource(resDesired) + latest := rm.concreteResource(resLatest) + if desired.ko == nil || latest.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) + if err != nil { + if updated != nil { + return rm.onError(updated, err) + } + return rm.onError(latest, err) + } + return rm.onSuccess(updated) +} + +// Delete attempts to destroy the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the +// resource being deleted (if delete is asynchronous and takes time) +func (rm *resourceManager) Delete( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + observed, err := rm.sdkDelete(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + + return rm.onSuccess(observed) +} + +// ARNFromName returns an AWS Resource Name from a given string name. This +// is useful for constructing ARNs for APIs that require ARNs in their +// GetAttributes operations but all we have (for new CRs at least) is a +// name for the resource +func (rm *resourceManager) ARNFromName(name string) string { + return fmt.Sprintf( + "arn:aws:elasticache:%s:%s:%s", + rm.awsRegion, + rm.awsAccountID, + name, + ) +} + +// LateInitialize returns an acktypes.AWSResource after setting the late initialized +// fields from the readOne call. This method will initialize the optional fields +// which were not provided by the k8s user but were defaulted by the AWS service. +// If there are no such fields to be initialized, the returned object is similar to +// object passed in the parameter. +func (rm *resourceManager) LateInitialize( + ctx context.Context, + latest acktypes.AWSResource, +) (acktypes.AWSResource, error) { + rlog := ackrtlog.FromContext(ctx) + // If there are no fields to late initialize, do nothing + if len(lateInitializeFieldNames) == 0 { + rlog.Debug("no late initialization required.") + return latest, nil + } + latestCopy := latest.DeepCopy() + lateInitConditionReason := "" + lateInitConditionMessage := "" + observed, err := rm.ReadOne(ctx, latestCopy) + if err != nil { + lateInitConditionMessage = "Unable to complete Read operation required for late initialization" + lateInitConditionReason = "Late Initialization Failure" + ackcondition.SetLateInitialized(latestCopy, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(latestCopy, corev1.ConditionFalse, nil, nil) + return latestCopy, err + } + lateInitializedRes := rm.lateInitializeFromReadOneOutput(observed, latestCopy) + incompleteInitialization := rm.incompleteLateInitialization(lateInitializedRes) + if incompleteInitialization { + // Add the condition with LateInitialized=False + lateInitConditionMessage = "Late initialization did not complete, requeuing with delay of 5 seconds" + lateInitConditionReason = "Delayed Late Initialization" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(lateInitializedRes, corev1.ConditionFalse, nil, nil) + return lateInitializedRes, ackrequeue.NeededAfter(nil, time.Duration(5)*time.Second) + } + // Set LateInitialized condition to True + lateInitConditionMessage = "Late initialization successful" + lateInitConditionReason = "Late initialization successful" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionTrue, &lateInitConditionMessage, &lateInitConditionReason) + return lateInitializedRes, nil +} + +// incompleteLateInitialization return true if there are fields which were supposed to be +// late initialized but are not. If all the fields are late initialized, false is returned +func (rm *resourceManager) incompleteLateInitialization( + res acktypes.AWSResource, +) bool { + ko := rm.concreteResource(res).ko.DeepCopy() + if ko.Spec.PreferredAvailabilityZone == nil { + return true + } + return false +} + +// lateInitializeFromReadOneOutput late initializes the 'latest' resource from the 'observed' +// resource and returns 'latest' resource +func (rm *resourceManager) lateInitializeFromReadOneOutput( + observed acktypes.AWSResource, + latest acktypes.AWSResource, +) acktypes.AWSResource { + observedKo := rm.concreteResource(observed).ko.DeepCopy() + latestKo := rm.concreteResource(latest).ko.DeepCopy() + if observedKo.Spec.PreferredAvailabilityZone != nil && latestKo.Spec.PreferredAvailabilityZone == nil { + latestKo.Spec.PreferredAvailabilityZone = observedKo.Spec.PreferredAvailabilityZone + } + return &resource{latestKo} +} + +// IsSynced returns true if the resource is synced. +func (rm *resourceManager) IsSynced(ctx context.Context, res acktypes.AWSResource) (bool, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's IsSynced() method received resource with nil CR object") + } + + return true, nil +} + +// EnsureTags ensures that tags are present inside the AWSResource. +// If the AWSResource does not have any existing resource tags, the 'tags' +// field is initialized and the controller tags are added. +// If the AWSResource has existing resource tags, then controller tags are +// added to the existing resource tags without overriding them. +// If the AWSResource does not support tags, only then the controller tags +// will not be added to the AWSResource. +func (rm *resourceManager) EnsureTags( + ctx context.Context, + res acktypes.AWSResource, + md acktypes.ServiceControllerMetadata, +) error { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's EnsureTags method received resource with nil CR object") + } + defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags := ToACKTags(existingTags) + tags := acktags.Merge(resourceTags, defaultTags) + r.ko.Spec.Tags = FromACKTags(tags) + return nil +} + +// newResourceManager returns a new struct implementing +// acktypes.AWSResourceManager +func newResourceManager( + cfg ackcfg.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + sess *session.Session, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (*resourceManager, error) { + return &resourceManager{ + cfg: cfg, + log: log, + metrics: metrics, + rr: rr, + awsAccountID: id, + awsRegion: region, + sess: sess, + sdkapi: svcsdk.New(sess), + }, nil +} + +// onError updates resource conditions and returns updated resource +// it returns nil if no condition is updated. +func (rm *resourceManager) onError( + r *resource, + err error, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, err + } + r1, updated := rm.updateConditions(r, false, err) + if !updated { + return r, err + } + for _, condition := range r1.Conditions() { + if condition.Type == ackv1alpha1.ConditionTypeTerminal && + condition.Status == corev1.ConditionTrue { + // resource is in Terminal condition + // return Terminal error + return r1, ackerr.Terminal + } + } + return r1, err +} + +// onSuccess updates resource conditions and returns updated resource +// it returns the supplied resource if no condition is updated. +func (rm *resourceManager) onSuccess( + r *resource, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, nil + } + r1, updated := rm.updateConditions(r, true, nil) + if !updated { + return r, nil + } + return r1, nil +} diff --git a/pkg/resource/cache_cluster/manager_factory.go b/pkg/resource/cache_cluster/manager_factory.go new file mode 100644 index 00000000..1c63020a --- /dev/null +++ b/pkg/resource/cache_cluster/manager_factory.go @@ -0,0 +1,96 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "fmt" + "sync" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/go-logr/logr" + + svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" +) + +// resourceManagerFactory produces resourceManager objects. It implements the +// `types.AWSResourceManagerFactory` interface. +type resourceManagerFactory struct { + sync.RWMutex + // rmCache contains resource managers for a particular AWS account ID + rmCache map[string]*resourceManager +} + +// ResourcePrototype returns an AWSResource that resource managers produced by +// this factory will handle +func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescriptor { + return &resourceDescriptor{} +} + +// ManagerFor returns a resource manager object that can manage resources for a +// supplied AWS account +func (f *resourceManagerFactory) ManagerFor( + cfg ackcfg.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + sess *session.Session, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (acktypes.AWSResourceManager, error) { + rmId := fmt.Sprintf("%s/%s", id, region) + f.RLock() + rm, found := f.rmCache[rmId] + f.RUnlock() + + if found { + return rm, nil + } + + f.Lock() + defer f.Unlock() + + rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + if err != nil { + return nil, err + } + f.rmCache[rmId] = rm + return rm, nil +} + +// IsAdoptable returns true if the resource is able to be adopted +func (f *resourceManagerFactory) IsAdoptable() bool { + return true +} + +// RequeueOnSuccessSeconds returns true if the resource should be requeued after specified seconds +// Default is false which means resource will not be requeued after success. +func (f *resourceManagerFactory) RequeueOnSuccessSeconds() int { + return 0 +} + +func newResourceManagerFactory() *resourceManagerFactory { + return &resourceManagerFactory{ + rmCache: map[string]*resourceManager{}, + } +} + +func init() { + svcresource.RegisterManagerFactory(newResourceManagerFactory()) +} diff --git a/pkg/resource/cache_cluster/references.go b/pkg/resource/cache_cluster/references.go new file mode 100644 index 00000000..0c0b666b --- /dev/null +++ b/pkg/resource/cache_cluster/references.go @@ -0,0 +1,526 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + snsapitypes "github.com/aws-controllers-k8s/sns-controller/apis/v1alpha1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// +kubebuilder:rbac:groups=sns.services.k8s.aws,resources=topics,verbs=get;list +// +kubebuilder:rbac:groups=sns.services.k8s.aws,resources=topics/status,verbs=get;list + +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + if ko.Spec.CacheParameterGroupRef != nil { + ko.Spec.CacheParameterGroupName = nil + } + + if ko.Spec.CacheSubnetGroupRef != nil { + ko.Spec.CacheSubnetGroupName = nil + } + + if ko.Spec.NotificationTopicRef != nil { + ko.Spec.NotificationTopicARN = nil + } + + if ko.Spec.ReplicationGroupRef != nil { + ko.Spec.ReplicationGroupID = nil + } + + if ko.Spec.SnapshotRef != nil { + ko.Spec.SnapshotName = nil + } + + return &resource{ko} +} + +// ResolveReferences finds if there are any Reference field(s) present +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. +func (rm *resourceManager) ResolveReferences( + ctx context.Context, + apiReader client.Reader, + res acktypes.AWSResource, +) (acktypes.AWSResource, bool, error) { + namespace := res.MetaObject().GetNamespace() + ko := rm.concreteResource(res).ko + + resourceHasReferences := false + err := validateReferenceFields(ko) + if fieldHasReferences, err := rm.resolveReferenceForCacheParameterGroupName(ctx, apiReader, namespace, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForCacheSubnetGroupName(ctx, apiReader, namespace, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForNotificationTopicARN(ctx, apiReader, namespace, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForReplicationGroupID(ctx, apiReader, namespace, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForSnapshotName(ctx, apiReader, namespace, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + return &resource{ko}, resourceHasReferences, err +} + +// validateReferenceFields validates the reference field and corresponding +// identifier field. +func validateReferenceFields(ko *svcapitypes.CacheCluster) error { + + if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("CacheParameterGroupName", "CacheParameterGroupRef") + } + + if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("CacheSubnetGroupName", "CacheSubnetGroupRef") + } + + if ko.Spec.NotificationTopicRef != nil && ko.Spec.NotificationTopicARN != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("NotificationTopicARN", "NotificationTopicRef") + } + + if ko.Spec.ReplicationGroupRef != nil && ko.Spec.ReplicationGroupID != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("ReplicationGroupID", "ReplicationGroupRef") + } + + if ko.Spec.SnapshotRef != nil && ko.Spec.SnapshotName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("SnapshotName", "SnapshotRef") + } + return nil +} + +// resolveReferenceForCacheParameterGroupName reads the resource referenced +// from CacheParameterGroupRef field and sets the CacheParameterGroupName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForCacheParameterGroupName( + ctx context.Context, + apiReader client.Reader, + namespace string, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.CacheParameterGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheParameterGroupRef") + } + obj := &svcapitypes.CacheParameterGroup{} + if err := getReferencedResourceState_CacheParameterGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.CacheParameterGroupName = (*string)(obj.Spec.CacheParameterGroupName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_CacheParameterGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_CacheParameterGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.CacheParameterGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceSynced, refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "CacheParameterGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "CacheParameterGroup", + namespace, name) + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "CacheParameterGroup", + namespace, name) + } + if obj.Spec.CacheParameterGroupName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "CacheParameterGroup", + namespace, name, + "Spec.CacheParameterGroupName") + } + return nil +} + +// resolveReferenceForCacheSubnetGroupName reads the resource referenced +// from CacheSubnetGroupRef field and sets the CacheSubnetGroupName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForCacheSubnetGroupName( + ctx context.Context, + apiReader client.Reader, + namespace string, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.CacheSubnetGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheSubnetGroupRef") + } + obj := &svcapitypes.CacheSubnetGroup{} + if err := getReferencedResourceState_CacheSubnetGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.CacheSubnetGroupName = (*string)(obj.Spec.CacheSubnetGroupName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_CacheSubnetGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_CacheSubnetGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.CacheSubnetGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceSynced, refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "CacheSubnetGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "CacheSubnetGroup", + namespace, name) + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "CacheSubnetGroup", + namespace, name) + } + if obj.Spec.CacheSubnetGroupName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "CacheSubnetGroup", + namespace, name, + "Spec.CacheSubnetGroupName") + } + return nil +} + +// resolveReferenceForNotificationTopicARN reads the resource referenced +// from NotificationTopicRef field and sets the NotificationTopicARN +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForNotificationTopicARN( + ctx context.Context, + apiReader client.Reader, + namespace string, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.NotificationTopicRef != nil && ko.Spec.NotificationTopicRef.From != nil { + hasReferences = true + arr := ko.Spec.NotificationTopicRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: NotificationTopicRef") + } + obj := &snsapitypes.Topic{} + if err := getReferencedResourceState_Topic(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.NotificationTopicARN = (*string)(obj.Status.ACKResourceMetadata.ARN) + } + + return hasReferences, nil +} + +// getReferencedResourceState_Topic looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_Topic( + ctx context.Context, + apiReader client.Reader, + obj *snsapitypes.Topic, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceSynced, refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "Topic", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "Topic", + namespace, name) + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "Topic", + namespace, name) + } + if obj.Status.ACKResourceMetadata == nil || obj.Status.ACKResourceMetadata.ARN == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "Topic", + namespace, name, + "Status.ACKResourceMetadata.ARN") + } + return nil +} + +// resolveReferenceForReplicationGroupID reads the resource referenced +// from ReplicationGroupRef field and sets the ReplicationGroupID +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForReplicationGroupID( + ctx context.Context, + apiReader client.Reader, + namespace string, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.ReplicationGroupRef != nil && ko.Spec.ReplicationGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.ReplicationGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: ReplicationGroupRef") + } + obj := &svcapitypes.ReplicationGroup{} + if err := getReferencedResourceState_ReplicationGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.ReplicationGroupID = (*string)(obj.Spec.ReplicationGroupID) + } + + return hasReferences, nil +} + +// getReferencedResourceState_ReplicationGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_ReplicationGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.ReplicationGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceSynced, refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "ReplicationGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "ReplicationGroup", + namespace, name) + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "ReplicationGroup", + namespace, name) + } + if obj.Spec.ReplicationGroupID == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "ReplicationGroup", + namespace, name, + "Spec.ReplicationGroupID") + } + return nil +} + +// resolveReferenceForSnapshotName reads the resource referenced +// from SnapshotRef field and sets the SnapshotName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForSnapshotName( + ctx context.Context, + apiReader client.Reader, + namespace string, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.SnapshotRef != nil && ko.Spec.SnapshotRef.From != nil { + hasReferences = true + arr := ko.Spec.SnapshotRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SnapshotRef") + } + obj := &svcapitypes.Snapshot{} + if err := getReferencedResourceState_Snapshot(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.SnapshotName = (*string)(obj.Spec.SnapshotName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_Snapshot looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_Snapshot( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.Snapshot, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceSynced, refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "Snapshot", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "Snapshot", + namespace, name) + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "Snapshot", + namespace, name) + } + if obj.Spec.SnapshotName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "Snapshot", + namespace, name, + "Spec.SnapshotName") + } + return nil +} diff --git a/pkg/resource/cache_cluster/resource.go b/pkg/resource/cache_cluster/resource.go new file mode 100644 index 00000000..f3ff87d3 --- /dev/null +++ b/pkg/resource/cache_cluster/resource.go @@ -0,0 +1,100 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &ackerrors.MissingNameIdentifier +) + +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` +// interface +type resource struct { + // The Kubernetes-native CR representing the resource + ko *svcapitypes.CacheCluster +} + +// Identifiers returns an AWSResourceIdentifiers object containing various +// identifying information, including the AWS account ID that owns the +// resource, the resource's AWS Resource Name (ARN) +func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { + return &resourceIdentifiers{r.ko.Status.ACKResourceMetadata} +} + +// IsBeingDeleted returns true if the Kubernetes resource has a non-zero +// deletion timestamp +func (r *resource) IsBeingDeleted() bool { + return !r.ko.DeletionTimestamp.IsZero() +} + +// RuntimeObject returns the Kubernetes apimachinery/runtime representation of +// the AWSResource +func (r *resource) RuntimeObject() rtclient.Object { + return r.ko +} + +// MetaObject returns the Kubernetes apimachinery/apis/meta/v1.Object +// representation of the AWSResource +func (r *resource) MetaObject() metav1.Object { + return r.ko.GetObjectMeta() +} + +// Conditions returns the ACK Conditions collection for the AWSResource +func (r *resource) Conditions() []*ackv1alpha1.Condition { + return r.ko.Status.Conditions +} + +// ReplaceConditions sets the Conditions status field for the resource +func (r *resource) ReplaceConditions(conditions []*ackv1alpha1.Condition) { + r.ko.Status.Conditions = conditions +} + +// SetObjectMeta sets the ObjectMeta field for the resource +func (r *resource) SetObjectMeta(meta metav1.ObjectMeta) { + r.ko.ObjectMeta = meta +} + +// SetStatus will set the Status field for the resource +func (r *resource) SetStatus(desired acktypes.AWSResource) { + r.ko.Status = desired.(*resource).ko.Status +} + +// SetIdentifiers sets the Spec or Status field that is referenced as the unique +// resource identifier +func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error { + if identifier.NameOrID == "" { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.CacheClusterID = &identifier.NameOrID + + return nil +} + +// DeepCopy will return a copy of the resource +func (r *resource) DeepCopy() acktypes.AWSResource { + koCopy := r.ko.DeepCopy() + return &resource{koCopy} +} diff --git a/pkg/resource/cache_cluster/sdk.go b/pkg/resource/cache_cluster/sdk.go new file mode 100644 index 00000000..edb94fbe --- /dev/null +++ b/pkg/resource/cache_cluster/sdk.go @@ -0,0 +1,1611 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + "github.com/aws/aws-sdk-go/aws" + svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &metav1.Time{} + _ = strings.ToLower("") + _ = &aws.JSONValue{} + _ = &svcsdk.ElastiCache{} + _ = &svcapitypes.CacheCluster{} + _ = ackv1alpha1.AWSAccountID("") + _ = &ackerr.NotFound + _ = &ackcondition.NotManagedMessage + _ = &reflect.Value{} + _ = fmt.Sprintf("") + _ = &ackrequeue.NoRequeue{} +) + +// sdkFind returns SDK-specific information about a supplied resource +func (rm *resourceManager) sdkFind( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkFind") + defer func() { + exit(err) + }() + // If any required fields in the input shape are missing, AWS resource is + // not created yet. Return NotFound here to indicate to callers that the + // resource isn't yet created. + if rm.requiredFieldsMissingFromReadManyInput(r) { + return nil, ackerr.NotFound + } + + input, err := rm.newListRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DescribeCacheClustersOutput + resp, err = rm.sdkapi.DescribeCacheClustersWithContext(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheClusters", err) + if err != nil { + if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheClusterNotFound" { + return nil, ackerr.NotFound + } + return nil, err + } + + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := r.ko.DeepCopy() + + found := false + for _, elem := range resp.CacheClusters { + if elem.ARN != nil { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + tmpARN := ackv1alpha1.AWSResourceName(*elem.ARN) + ko.Status.ACKResourceMetadata.ARN = &tmpARN + } + if elem.AtRestEncryptionEnabled != nil { + ko.Status.AtRestEncryptionEnabled = elem.AtRestEncryptionEnabled + } else { + ko.Status.AtRestEncryptionEnabled = nil + } + if elem.AuthTokenEnabled != nil { + ko.Status.AuthTokenEnabled = elem.AuthTokenEnabled + } else { + ko.Status.AuthTokenEnabled = nil + } + if elem.AuthTokenLastModifiedDate != nil { + ko.Status.AuthTokenLastModifiedDate = &metav1.Time{*elem.AuthTokenLastModifiedDate} + } else { + ko.Status.AuthTokenLastModifiedDate = nil + } + if elem.AutoMinorVersionUpgrade != nil { + ko.Spec.AutoMinorVersionUpgrade = elem.AutoMinorVersionUpgrade + } else { + ko.Spec.AutoMinorVersionUpgrade = nil + } + if elem.CacheClusterCreateTime != nil { + ko.Status.CacheClusterCreateTime = &metav1.Time{*elem.CacheClusterCreateTime} + } else { + ko.Status.CacheClusterCreateTime = nil + } + if elem.CacheClusterId != nil { + ko.Spec.CacheClusterID = elem.CacheClusterId + } else { + ko.Spec.CacheClusterID = nil + } + if elem.CacheClusterStatus != nil { + ko.Status.CacheClusterStatus = elem.CacheClusterStatus + } else { + ko.Status.CacheClusterStatus = nil + } + if elem.CacheNodeType != nil { + ko.Spec.CacheNodeType = elem.CacheNodeType + } else { + ko.Spec.CacheNodeType = nil + } + if elem.CacheNodes != nil { + f9 := []*svcapitypes.CacheNode{} + for _, f9iter := range elem.CacheNodes { + f9elem := &svcapitypes.CacheNode{} + if f9iter.CacheNodeCreateTime != nil { + f9elem.CacheNodeCreateTime = &metav1.Time{*f9iter.CacheNodeCreateTime} + } + if f9iter.CacheNodeId != nil { + f9elem.CacheNodeID = f9iter.CacheNodeId + } + if f9iter.CacheNodeStatus != nil { + f9elem.CacheNodeStatus = f9iter.CacheNodeStatus + } + if f9iter.CustomerAvailabilityZone != nil { + f9elem.CustomerAvailabilityZone = f9iter.CustomerAvailabilityZone + } + if f9iter.CustomerOutpostArn != nil { + f9elem.CustomerOutpostARN = f9iter.CustomerOutpostArn + } + if f9iter.Endpoint != nil { + f9elemf5 := &svcapitypes.Endpoint{} + if f9iter.Endpoint.Address != nil { + f9elemf5.Address = f9iter.Endpoint.Address + } + if f9iter.Endpoint.Port != nil { + f9elemf5.Port = f9iter.Endpoint.Port + } + f9elem.Endpoint = f9elemf5 + } + if f9iter.ParameterGroupStatus != nil { + f9elem.ParameterGroupStatus = f9iter.ParameterGroupStatus + } + if f9iter.SourceCacheNodeId != nil { + f9elem.SourceCacheNodeID = f9iter.SourceCacheNodeId + } + f9 = append(f9, f9elem) + } + ko.Status.CacheNodes = f9 + } else { + ko.Status.CacheNodes = nil + } + if elem.CacheParameterGroup != nil { + f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} + if elem.CacheParameterGroup.CacheNodeIdsToReboot != nil { + f10f0 := []*string{} + for _, f10f0iter := range elem.CacheParameterGroup.CacheNodeIdsToReboot { + var f10f0elem string + f10f0elem = *f10f0iter + f10f0 = append(f10f0, &f10f0elem) + } + f10.CacheNodeIDsToReboot = f10f0 + } + if elem.CacheParameterGroup.CacheParameterGroupName != nil { + f10.CacheParameterGroupName = elem.CacheParameterGroup.CacheParameterGroupName + } + if elem.CacheParameterGroup.ParameterApplyStatus != nil { + f10.ParameterApplyStatus = elem.CacheParameterGroup.ParameterApplyStatus + } + ko.Status.CacheParameterGroup = f10 + } else { + ko.Status.CacheParameterGroup = nil + } + if elem.CacheSecurityGroups != nil { + f11 := []*svcapitypes.CacheSecurityGroupMembership{} + for _, f11iter := range elem.CacheSecurityGroups { + f11elem := &svcapitypes.CacheSecurityGroupMembership{} + if f11iter.CacheSecurityGroupName != nil { + f11elem.CacheSecurityGroupName = f11iter.CacheSecurityGroupName + } + if f11iter.Status != nil { + f11elem.Status = f11iter.Status + } + f11 = append(f11, f11elem) + } + ko.Status.CacheSecurityGroups = f11 + } else { + ko.Status.CacheSecurityGroups = nil + } + if elem.CacheSubnetGroupName != nil { + ko.Spec.CacheSubnetGroupName = elem.CacheSubnetGroupName + } else { + ko.Spec.CacheSubnetGroupName = nil + } + if elem.ClientDownloadLandingPage != nil { + ko.Status.ClientDownloadLandingPage = elem.ClientDownloadLandingPage + } else { + ko.Status.ClientDownloadLandingPage = nil + } + if elem.ConfigurationEndpoint != nil { + f14 := &svcapitypes.Endpoint{} + if elem.ConfigurationEndpoint.Address != nil { + f14.Address = elem.ConfigurationEndpoint.Address + } + if elem.ConfigurationEndpoint.Port != nil { + f14.Port = elem.ConfigurationEndpoint.Port + } + ko.Status.ConfigurationEndpoint = f14 + } else { + ko.Status.ConfigurationEndpoint = nil + } + if elem.Engine != nil { + ko.Spec.Engine = elem.Engine + } else { + ko.Spec.Engine = nil + } + if elem.EngineVersion != nil { + ko.Spec.EngineVersion = elem.EngineVersion + } else { + ko.Spec.EngineVersion = nil + } + if elem.IpDiscovery != nil { + ko.Spec.IPDiscovery = elem.IpDiscovery + } else { + ko.Spec.IPDiscovery = nil + } + if elem.NetworkType != nil { + ko.Spec.NetworkType = elem.NetworkType + } else { + ko.Spec.NetworkType = nil + } + if elem.NotificationConfiguration != nil { + f19 := &svcapitypes.NotificationConfiguration{} + if elem.NotificationConfiguration.TopicArn != nil { + f19.TopicARN = elem.NotificationConfiguration.TopicArn + } + if elem.NotificationConfiguration.TopicStatus != nil { + f19.TopicStatus = elem.NotificationConfiguration.TopicStatus + } + ko.Status.NotificationConfiguration = f19 + } else { + ko.Status.NotificationConfiguration = nil + } + if elem.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = elem.NumCacheNodes + } else { + ko.Spec.NumCacheNodes = nil + } + if elem.PendingModifiedValues != nil { + f21 := &svcapitypes.PendingModifiedValues{} + if elem.PendingModifiedValues.AuthTokenStatus != nil { + f21.AuthTokenStatus = elem.PendingModifiedValues.AuthTokenStatus + } + if elem.PendingModifiedValues.CacheNodeIdsToRemove != nil { + f21f1 := []*string{} + for _, f21f1iter := range elem.PendingModifiedValues.CacheNodeIdsToRemove { + var f21f1elem string + f21f1elem = *f21f1iter + f21f1 = append(f21f1, &f21f1elem) + } + f21.CacheNodeIDsToRemove = f21f1 + } + if elem.PendingModifiedValues.CacheNodeType != nil { + f21.CacheNodeType = elem.PendingModifiedValues.CacheNodeType + } + if elem.PendingModifiedValues.EngineVersion != nil { + f21.EngineVersion = elem.PendingModifiedValues.EngineVersion + } + if elem.PendingModifiedValues.NumCacheNodes != nil { + f21.NumCacheNodes = elem.PendingModifiedValues.NumCacheNodes + } + if elem.PendingModifiedValues.TransitEncryptionEnabled != nil { + f21.TransitEncryptionEnabled = elem.PendingModifiedValues.TransitEncryptionEnabled + } + if elem.PendingModifiedValues.TransitEncryptionMode != nil { + f21.TransitEncryptionMode = elem.PendingModifiedValues.TransitEncryptionMode + } + ko.Status.PendingModifiedValues = f21 + } else { + ko.Status.PendingModifiedValues = nil + } + if elem.PreferredAvailabilityZone != nil { + ko.Spec.PreferredAvailabilityZone = elem.PreferredAvailabilityZone + } else { + ko.Spec.PreferredAvailabilityZone = nil + } + if elem.PreferredMaintenanceWindow != nil { + ko.Spec.PreferredMaintenanceWindow = elem.PreferredMaintenanceWindow + } else { + ko.Spec.PreferredMaintenanceWindow = nil + } + if elem.PreferredOutpostArn != nil { + ko.Spec.PreferredOutpostARN = elem.PreferredOutpostArn + } else { + ko.Spec.PreferredOutpostARN = nil + } + if elem.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = elem.ReplicationGroupId + } else { + ko.Spec.ReplicationGroupID = nil + } + if elem.ReplicationGroupLogDeliveryEnabled != nil { + ko.Status.ReplicationGroupLogDeliveryEnabled = elem.ReplicationGroupLogDeliveryEnabled + } else { + ko.Status.ReplicationGroupLogDeliveryEnabled = nil + } + if elem.SecurityGroups != nil { + f27 := []*svcapitypes.SecurityGroupMembership{} + for _, f27iter := range elem.SecurityGroups { + f27elem := &svcapitypes.SecurityGroupMembership{} + if f27iter.SecurityGroupId != nil { + f27elem.SecurityGroupID = f27iter.SecurityGroupId + } + if f27iter.Status != nil { + f27elem.Status = f27iter.Status + } + f27 = append(f27, f27elem) + } + ko.Status.SecurityGroups = f27 + } else { + ko.Status.SecurityGroups = nil + } + if elem.SnapshotRetentionLimit != nil { + ko.Spec.SnapshotRetentionLimit = elem.SnapshotRetentionLimit + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if elem.SnapshotWindow != nil { + ko.Spec.SnapshotWindow = elem.SnapshotWindow + } else { + ko.Spec.SnapshotWindow = nil + } + if elem.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = elem.TransitEncryptionEnabled + } else { + ko.Spec.TransitEncryptionEnabled = nil + } + if elem.TransitEncryptionMode != nil { + ko.Status.TransitEncryptionMode = elem.TransitEncryptionMode + } else { + ko.Status.TransitEncryptionMode = nil + } + found = true + break + } + if !found { + return nil, ackerr.NotFound + } + + rm.setStatusDefaults(ko) + if pendingModifications := ko.Status.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } + if isAvailable(r) { + ackcondition.SetSynced(&resource{ko}, corev1.ConditionTrue, nil, nil) + } else { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } + + return &resource{ko}, nil +} + +// requiredFieldsMissingFromReadManyInput returns true if there are any fields +// for the ReadMany Input shape that are required but not present in the +// resource's Spec or Status +func (rm *resourceManager) requiredFieldsMissingFromReadManyInput( + r *resource, +) bool { + return r.ko.Spec.CacheClusterID == nil + +} + +// newListRequestPayload returns SDK-specific struct for the HTTP request +// payload of the List API call for the resource +func (rm *resourceManager) newListRequestPayload( + r *resource, +) (*svcsdk.DescribeCacheClustersInput, error) { + res := &svcsdk.DescribeCacheClustersInput{} + + if r.ko.Spec.CacheClusterID != nil { + res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + } + + return res, nil +} + +// sdkCreate creates the supplied resource in the backend AWS service API and +// returns a copy of the resource with resource fields (in both Spec and +// Status) filled in with values from the CREATE API operation's Output shape. +func (rm *resourceManager) sdkCreate( + ctx context.Context, + desired *resource, +) (created *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkCreate") + defer func() { + exit(err) + }() + input, err := rm.newCreateRequestPayload(ctx, desired) + if err != nil { + return nil, err + } + + var resp *svcsdk.CreateCacheClusterOutput + _ = resp + resp, err = rm.sdkapi.CreateCacheClusterWithContext(ctx, input) + rm.metrics.RecordAPICall("CREATE", "CreateCacheCluster", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.CacheCluster.ARN != nil { + arn := ackv1alpha1.AWSResourceName(*resp.CacheCluster.ARN) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.CacheCluster.AtRestEncryptionEnabled != nil { + ko.Status.AtRestEncryptionEnabled = resp.CacheCluster.AtRestEncryptionEnabled + } else { + ko.Status.AtRestEncryptionEnabled = nil + } + if resp.CacheCluster.AuthTokenEnabled != nil { + ko.Status.AuthTokenEnabled = resp.CacheCluster.AuthTokenEnabled + } else { + ko.Status.AuthTokenEnabled = nil + } + if resp.CacheCluster.AuthTokenLastModifiedDate != nil { + ko.Status.AuthTokenLastModifiedDate = &metav1.Time{*resp.CacheCluster.AuthTokenLastModifiedDate} + } else { + ko.Status.AuthTokenLastModifiedDate = nil + } + if resp.CacheCluster.AutoMinorVersionUpgrade != nil { + ko.Spec.AutoMinorVersionUpgrade = resp.CacheCluster.AutoMinorVersionUpgrade + } else { + ko.Spec.AutoMinorVersionUpgrade = nil + } + if resp.CacheCluster.CacheClusterCreateTime != nil { + ko.Status.CacheClusterCreateTime = &metav1.Time{*resp.CacheCluster.CacheClusterCreateTime} + } else { + ko.Status.CacheClusterCreateTime = nil + } + if resp.CacheCluster.CacheClusterId != nil { + ko.Spec.CacheClusterID = resp.CacheCluster.CacheClusterId + } else { + ko.Spec.CacheClusterID = nil + } + if resp.CacheCluster.CacheClusterStatus != nil { + ko.Status.CacheClusterStatus = resp.CacheCluster.CacheClusterStatus + } else { + ko.Status.CacheClusterStatus = nil + } + if resp.CacheCluster.CacheNodeType != nil { + ko.Spec.CacheNodeType = resp.CacheCluster.CacheNodeType + } else { + ko.Spec.CacheNodeType = nil + } + if resp.CacheCluster.CacheNodes != nil { + f9 := []*svcapitypes.CacheNode{} + for _, f9iter := range resp.CacheCluster.CacheNodes { + f9elem := &svcapitypes.CacheNode{} + if f9iter.CacheNodeCreateTime != nil { + f9elem.CacheNodeCreateTime = &metav1.Time{*f9iter.CacheNodeCreateTime} + } + if f9iter.CacheNodeId != nil { + f9elem.CacheNodeID = f9iter.CacheNodeId + } + if f9iter.CacheNodeStatus != nil { + f9elem.CacheNodeStatus = f9iter.CacheNodeStatus + } + if f9iter.CustomerAvailabilityZone != nil { + f9elem.CustomerAvailabilityZone = f9iter.CustomerAvailabilityZone + } + if f9iter.CustomerOutpostArn != nil { + f9elem.CustomerOutpostARN = f9iter.CustomerOutpostArn + } + if f9iter.Endpoint != nil { + f9elemf5 := &svcapitypes.Endpoint{} + if f9iter.Endpoint.Address != nil { + f9elemf5.Address = f9iter.Endpoint.Address + } + if f9iter.Endpoint.Port != nil { + f9elemf5.Port = f9iter.Endpoint.Port + } + f9elem.Endpoint = f9elemf5 + } + if f9iter.ParameterGroupStatus != nil { + f9elem.ParameterGroupStatus = f9iter.ParameterGroupStatus + } + if f9iter.SourceCacheNodeId != nil { + f9elem.SourceCacheNodeID = f9iter.SourceCacheNodeId + } + f9 = append(f9, f9elem) + } + ko.Status.CacheNodes = f9 + } else { + ko.Status.CacheNodes = nil + } + if resp.CacheCluster.CacheParameterGroup != nil { + f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} + if resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot != nil { + f10f0 := []*string{} + for _, f10f0iter := range resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot { + var f10f0elem string + f10f0elem = *f10f0iter + f10f0 = append(f10f0, &f10f0elem) + } + f10.CacheNodeIDsToReboot = f10f0 + } + if resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { + f10.CacheParameterGroupName = resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName + } + if resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus != nil { + f10.ParameterApplyStatus = resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus + } + ko.Status.CacheParameterGroup = f10 + } else { + ko.Status.CacheParameterGroup = nil + } + if resp.CacheCluster.CacheSecurityGroups != nil { + f11 := []*svcapitypes.CacheSecurityGroupMembership{} + for _, f11iter := range resp.CacheCluster.CacheSecurityGroups { + f11elem := &svcapitypes.CacheSecurityGroupMembership{} + if f11iter.CacheSecurityGroupName != nil { + f11elem.CacheSecurityGroupName = f11iter.CacheSecurityGroupName + } + if f11iter.Status != nil { + f11elem.Status = f11iter.Status + } + f11 = append(f11, f11elem) + } + ko.Status.CacheSecurityGroups = f11 + } else { + ko.Status.CacheSecurityGroups = nil + } + if resp.CacheCluster.CacheSubnetGroupName != nil { + ko.Spec.CacheSubnetGroupName = resp.CacheCluster.CacheSubnetGroupName + } else { + ko.Spec.CacheSubnetGroupName = nil + } + if resp.CacheCluster.ClientDownloadLandingPage != nil { + ko.Status.ClientDownloadLandingPage = resp.CacheCluster.ClientDownloadLandingPage + } else { + ko.Status.ClientDownloadLandingPage = nil + } + if resp.CacheCluster.ConfigurationEndpoint != nil { + f14 := &svcapitypes.Endpoint{} + if resp.CacheCluster.ConfigurationEndpoint.Address != nil { + f14.Address = resp.CacheCluster.ConfigurationEndpoint.Address + } + if resp.CacheCluster.ConfigurationEndpoint.Port != nil { + f14.Port = resp.CacheCluster.ConfigurationEndpoint.Port + } + ko.Status.ConfigurationEndpoint = f14 + } else { + ko.Status.ConfigurationEndpoint = nil + } + if resp.CacheCluster.Engine != nil { + ko.Spec.Engine = resp.CacheCluster.Engine + } else { + ko.Spec.Engine = nil + } + if resp.CacheCluster.EngineVersion != nil { + ko.Spec.EngineVersion = resp.CacheCluster.EngineVersion + } else { + ko.Spec.EngineVersion = nil + } + if resp.CacheCluster.IpDiscovery != nil { + ko.Spec.IPDiscovery = resp.CacheCluster.IpDiscovery + } else { + ko.Spec.IPDiscovery = nil + } + if resp.CacheCluster.NetworkType != nil { + ko.Spec.NetworkType = resp.CacheCluster.NetworkType + } else { + ko.Spec.NetworkType = nil + } + if resp.CacheCluster.NotificationConfiguration != nil { + f19 := &svcapitypes.NotificationConfiguration{} + if resp.CacheCluster.NotificationConfiguration.TopicArn != nil { + f19.TopicARN = resp.CacheCluster.NotificationConfiguration.TopicArn + } + if resp.CacheCluster.NotificationConfiguration.TopicStatus != nil { + f19.TopicStatus = resp.CacheCluster.NotificationConfiguration.TopicStatus + } + ko.Status.NotificationConfiguration = f19 + } else { + ko.Status.NotificationConfiguration = nil + } + if resp.CacheCluster.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = resp.CacheCluster.NumCacheNodes + } else { + ko.Spec.NumCacheNodes = nil + } + if resp.CacheCluster.PendingModifiedValues != nil { + f21 := &svcapitypes.PendingModifiedValues{} + if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != nil { + f21.AuthTokenStatus = resp.CacheCluster.PendingModifiedValues.AuthTokenStatus + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove != nil { + f21f1 := []*string{} + for _, f21f1iter := range resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove { + var f21f1elem string + f21f1elem = *f21f1iter + f21f1 = append(f21f1, &f21f1elem) + } + f21.CacheNodeIDsToRemove = f21f1 + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeType != nil { + f21.CacheNodeType = resp.CacheCluster.PendingModifiedValues.CacheNodeType + } + if resp.CacheCluster.PendingModifiedValues.EngineVersion != nil { + f21.EngineVersion = resp.CacheCluster.PendingModifiedValues.EngineVersion + } + if resp.CacheCluster.PendingModifiedValues.NumCacheNodes != nil { + f21.NumCacheNodes = resp.CacheCluster.PendingModifiedValues.NumCacheNodes + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled != nil { + f21.TransitEncryptionEnabled = resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != nil { + f21.TransitEncryptionMode = resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode + } + ko.Status.PendingModifiedValues = f21 + } else { + ko.Status.PendingModifiedValues = nil + } + if resp.CacheCluster.PreferredAvailabilityZone != nil { + ko.Spec.PreferredAvailabilityZone = resp.CacheCluster.PreferredAvailabilityZone + } else { + ko.Spec.PreferredAvailabilityZone = nil + } + if resp.CacheCluster.PreferredMaintenanceWindow != nil { + ko.Spec.PreferredMaintenanceWindow = resp.CacheCluster.PreferredMaintenanceWindow + } else { + ko.Spec.PreferredMaintenanceWindow = nil + } + if resp.CacheCluster.PreferredOutpostArn != nil { + ko.Spec.PreferredOutpostARN = resp.CacheCluster.PreferredOutpostArn + } else { + ko.Spec.PreferredOutpostARN = nil + } + if resp.CacheCluster.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = resp.CacheCluster.ReplicationGroupId + } else { + ko.Spec.ReplicationGroupID = nil + } + if resp.CacheCluster.ReplicationGroupLogDeliveryEnabled != nil { + ko.Status.ReplicationGroupLogDeliveryEnabled = resp.CacheCluster.ReplicationGroupLogDeliveryEnabled + } else { + ko.Status.ReplicationGroupLogDeliveryEnabled = nil + } + if resp.CacheCluster.SecurityGroups != nil { + f27 := []*svcapitypes.SecurityGroupMembership{} + for _, f27iter := range resp.CacheCluster.SecurityGroups { + f27elem := &svcapitypes.SecurityGroupMembership{} + if f27iter.SecurityGroupId != nil { + f27elem.SecurityGroupID = f27iter.SecurityGroupId + } + if f27iter.Status != nil { + f27elem.Status = f27iter.Status + } + f27 = append(f27, f27elem) + } + ko.Status.SecurityGroups = f27 + } else { + ko.Status.SecurityGroups = nil + } + if resp.CacheCluster.SnapshotRetentionLimit != nil { + ko.Spec.SnapshotRetentionLimit = resp.CacheCluster.SnapshotRetentionLimit + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if resp.CacheCluster.SnapshotWindow != nil { + ko.Spec.SnapshotWindow = resp.CacheCluster.SnapshotWindow + } else { + ko.Spec.SnapshotWindow = nil + } + if resp.CacheCluster.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = resp.CacheCluster.TransitEncryptionEnabled + } else { + ko.Spec.TransitEncryptionEnabled = nil + } + if resp.CacheCluster.TransitEncryptionMode != nil { + ko.Status.TransitEncryptionMode = resp.CacheCluster.TransitEncryptionMode + } else { + ko.Status.TransitEncryptionMode = nil + } + + rm.setStatusDefaults(ko) + // custom set output from response + ko, err = rm.customCreateCacheClusterSetOutput(ctx, desired, resp, ko) + if err != nil { + return nil, err + } + if isCreating(&resource{ko}) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } + + return &resource{ko}, nil +} + +// newCreateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newCreateRequestPayload( + ctx context.Context, + r *resource, +) (*svcsdk.CreateCacheClusterInput, error) { + res := &svcsdk.CreateCacheClusterInput{} + + if r.ko.Spec.AZMode != nil { + res.SetAZMode(*r.ko.Spec.AZMode) + } + if r.ko.Spec.AuthToken != nil { + tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) + if err != nil { + return nil, ackrequeue.Needed(err) + } + if tmpSecret != "" { + res.SetAuthToken(tmpSecret) + } + } + if r.ko.Spec.AutoMinorVersionUpgrade != nil { + res.SetAutoMinorVersionUpgrade(*r.ko.Spec.AutoMinorVersionUpgrade) + } + if r.ko.Spec.CacheClusterID != nil { + res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + } + if r.ko.Spec.CacheNodeType != nil { + res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + } + if r.ko.Spec.CacheParameterGroupName != nil { + res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + } + if r.ko.Spec.CacheSecurityGroupNames != nil { + f6 := []*string{} + for _, f6iter := range r.ko.Spec.CacheSecurityGroupNames { + var f6elem string + f6elem = *f6iter + f6 = append(f6, &f6elem) + } + res.SetCacheSecurityGroupNames(f6) + } + if r.ko.Spec.CacheSubnetGroupName != nil { + res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + } + if r.ko.Spec.Engine != nil { + res.SetEngine(*r.ko.Spec.Engine) + } + if r.ko.Spec.EngineVersion != nil { + res.SetEngineVersion(*r.ko.Spec.EngineVersion) + } + if r.ko.Spec.IPDiscovery != nil { + res.SetIpDiscovery(*r.ko.Spec.IPDiscovery) + } + if r.ko.Spec.LogDeliveryConfigurations != nil { + f11 := []*svcsdk.LogDeliveryConfigurationRequest{} + for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { + f11elem := &svcsdk.LogDeliveryConfigurationRequest{} + if f11iter.DestinationDetails != nil { + f11elemf0 := &svcsdk.DestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { + f11elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f11elemf0f0.SetLogGroup(*f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + } + f11elemf0.SetCloudWatchLogsDetails(f11elemf0f0) + } + if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { + f11elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} + if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f11elemf0f1.SetDeliveryStream(*f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + } + f11elemf0.SetKinesisFirehoseDetails(f11elemf0f1) + } + f11elem.SetDestinationDetails(f11elemf0) + } + if f11iter.DestinationType != nil { + f11elem.SetDestinationType(*f11iter.DestinationType) + } + if f11iter.Enabled != nil { + f11elem.SetEnabled(*f11iter.Enabled) + } + if f11iter.LogFormat != nil { + f11elem.SetLogFormat(*f11iter.LogFormat) + } + if f11iter.LogType != nil { + f11elem.SetLogType(*f11iter.LogType) + } + f11 = append(f11, f11elem) + } + res.SetLogDeliveryConfigurations(f11) + } + if r.ko.Spec.NetworkType != nil { + res.SetNetworkType(*r.ko.Spec.NetworkType) + } + if r.ko.Spec.NotificationTopicARN != nil { + res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + } + if r.ko.Spec.NumCacheNodes != nil { + res.SetNumCacheNodes(*r.ko.Spec.NumCacheNodes) + } + if r.ko.Spec.OutpostMode != nil { + res.SetOutpostMode(*r.ko.Spec.OutpostMode) + } + if r.ko.Spec.Port != nil { + res.SetPort(*r.ko.Spec.Port) + } + if r.ko.Spec.PreferredAvailabilityZone != nil { + res.SetPreferredAvailabilityZone(*r.ko.Spec.PreferredAvailabilityZone) + } + if r.ko.Spec.PreferredAvailabilityZones != nil { + f18 := []*string{} + for _, f18iter := range r.ko.Spec.PreferredAvailabilityZones { + var f18elem string + f18elem = *f18iter + f18 = append(f18, &f18elem) + } + res.SetPreferredAvailabilityZones(f18) + } + if r.ko.Spec.PreferredMaintenanceWindow != nil { + res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + } + if r.ko.Spec.PreferredOutpostARN != nil { + res.SetPreferredOutpostArn(*r.ko.Spec.PreferredOutpostARN) + } + if r.ko.Spec.PreferredOutpostARNs != nil { + f21 := []*string{} + for _, f21iter := range r.ko.Spec.PreferredOutpostARNs { + var f21elem string + f21elem = *f21iter + f21 = append(f21, &f21elem) + } + res.SetPreferredOutpostArns(f21) + } + if r.ko.Spec.ReplicationGroupID != nil { + res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + } + if r.ko.Spec.SecurityGroupIDs != nil { + f23 := []*string{} + for _, f23iter := range r.ko.Spec.SecurityGroupIDs { + var f23elem string + f23elem = *f23iter + f23 = append(f23, &f23elem) + } + res.SetSecurityGroupIds(f23) + } + if r.ko.Spec.SnapshotARNs != nil { + f24 := []*string{} + for _, f24iter := range r.ko.Spec.SnapshotARNs { + var f24elem string + f24elem = *f24iter + f24 = append(f24, &f24elem) + } + res.SetSnapshotArns(f24) + } + if r.ko.Spec.SnapshotName != nil { + res.SetSnapshotName(*r.ko.Spec.SnapshotName) + } + if r.ko.Spec.SnapshotRetentionLimit != nil { + res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + } + if r.ko.Spec.SnapshotWindow != nil { + res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + } + if r.ko.Spec.Tags != nil { + f28 := []*svcsdk.Tag{} + for _, f28iter := range r.ko.Spec.Tags { + f28elem := &svcsdk.Tag{} + if f28iter.Key != nil { + f28elem.SetKey(*f28iter.Key) + } + if f28iter.Value != nil { + f28elem.SetValue(*f28iter.Value) + } + f28 = append(f28, f28elem) + } + res.SetTags(f28) + } + if r.ko.Spec.TransitEncryptionEnabled != nil { + res.SetTransitEncryptionEnabled(*r.ko.Spec.TransitEncryptionEnabled) + } + + return res, nil +} + +// sdkUpdate patches the supplied resource in the backend AWS service API and +// returns a new resource with updated fields. +func (rm *resourceManager) sdkUpdate( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (updated *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkUpdate") + defer func() { + exit(err) + }() + if immutableFieldChanges := rm.getImmutableFieldChanges(delta); len(immutableFieldChanges) > 0 { + msg := fmt.Sprintf("Immutable Spec fields have been modified: %s", strings.Join(immutableFieldChanges, ",")) + return nil, ackerr.NewTerminalError(fmt.Errorf(msg)) + } + if delta.DifferentAt("Spec.Tags") { + if err = rm.syncTags(ctx, desired, latest); err != nil { + return nil, err + } + } else if !delta.DifferentExcept("Spec.Tags") { + // If the only difference between the desired and latest is in the + // Spec.Tags field, we can skip the ModifyCacheCluster call. + return desired, nil + } + + input, err := rm.newUpdateRequestPayload(ctx, desired, delta) + if err != nil { + return nil, err + } + if err := rm.updateCacheClusterPayload(input, desired, latest, delta); err != nil { + return nil, ackerr.NewTerminalError(err) + } + + var resp *svcsdk.ModifyCacheClusterOutput + _ = resp + resp, err = rm.sdkapi.ModifyCacheClusterWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "ModifyCacheCluster", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.CacheCluster.ARN != nil { + arn := ackv1alpha1.AWSResourceName(*resp.CacheCluster.ARN) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.CacheCluster.AtRestEncryptionEnabled != nil { + ko.Status.AtRestEncryptionEnabled = resp.CacheCluster.AtRestEncryptionEnabled + } else { + ko.Status.AtRestEncryptionEnabled = nil + } + if resp.CacheCluster.AuthTokenEnabled != nil { + ko.Status.AuthTokenEnabled = resp.CacheCluster.AuthTokenEnabled + } else { + ko.Status.AuthTokenEnabled = nil + } + if resp.CacheCluster.AuthTokenLastModifiedDate != nil { + ko.Status.AuthTokenLastModifiedDate = &metav1.Time{*resp.CacheCluster.AuthTokenLastModifiedDate} + } else { + ko.Status.AuthTokenLastModifiedDate = nil + } + if resp.CacheCluster.AutoMinorVersionUpgrade != nil { + ko.Spec.AutoMinorVersionUpgrade = resp.CacheCluster.AutoMinorVersionUpgrade + } else { + ko.Spec.AutoMinorVersionUpgrade = nil + } + if resp.CacheCluster.CacheClusterCreateTime != nil { + ko.Status.CacheClusterCreateTime = &metav1.Time{*resp.CacheCluster.CacheClusterCreateTime} + } else { + ko.Status.CacheClusterCreateTime = nil + } + if resp.CacheCluster.CacheClusterId != nil { + ko.Spec.CacheClusterID = resp.CacheCluster.CacheClusterId + } else { + ko.Spec.CacheClusterID = nil + } + if resp.CacheCluster.CacheClusterStatus != nil { + ko.Status.CacheClusterStatus = resp.CacheCluster.CacheClusterStatus + } else { + ko.Status.CacheClusterStatus = nil + } + if resp.CacheCluster.CacheNodeType != nil { + ko.Spec.CacheNodeType = resp.CacheCluster.CacheNodeType + } else { + ko.Spec.CacheNodeType = nil + } + if resp.CacheCluster.CacheNodes != nil { + f9 := []*svcapitypes.CacheNode{} + for _, f9iter := range resp.CacheCluster.CacheNodes { + f9elem := &svcapitypes.CacheNode{} + if f9iter.CacheNodeCreateTime != nil { + f9elem.CacheNodeCreateTime = &metav1.Time{*f9iter.CacheNodeCreateTime} + } + if f9iter.CacheNodeId != nil { + f9elem.CacheNodeID = f9iter.CacheNodeId + } + if f9iter.CacheNodeStatus != nil { + f9elem.CacheNodeStatus = f9iter.CacheNodeStatus + } + if f9iter.CustomerAvailabilityZone != nil { + f9elem.CustomerAvailabilityZone = f9iter.CustomerAvailabilityZone + } + if f9iter.CustomerOutpostArn != nil { + f9elem.CustomerOutpostARN = f9iter.CustomerOutpostArn + } + if f9iter.Endpoint != nil { + f9elemf5 := &svcapitypes.Endpoint{} + if f9iter.Endpoint.Address != nil { + f9elemf5.Address = f9iter.Endpoint.Address + } + if f9iter.Endpoint.Port != nil { + f9elemf5.Port = f9iter.Endpoint.Port + } + f9elem.Endpoint = f9elemf5 + } + if f9iter.ParameterGroupStatus != nil { + f9elem.ParameterGroupStatus = f9iter.ParameterGroupStatus + } + if f9iter.SourceCacheNodeId != nil { + f9elem.SourceCacheNodeID = f9iter.SourceCacheNodeId + } + f9 = append(f9, f9elem) + } + ko.Status.CacheNodes = f9 + } else { + ko.Status.CacheNodes = nil + } + if resp.CacheCluster.CacheParameterGroup != nil { + f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} + if resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot != nil { + f10f0 := []*string{} + for _, f10f0iter := range resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot { + var f10f0elem string + f10f0elem = *f10f0iter + f10f0 = append(f10f0, &f10f0elem) + } + f10.CacheNodeIDsToReboot = f10f0 + } + if resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { + f10.CacheParameterGroupName = resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName + } + if resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus != nil { + f10.ParameterApplyStatus = resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus + } + ko.Status.CacheParameterGroup = f10 + } else { + ko.Status.CacheParameterGroup = nil + } + if resp.CacheCluster.CacheSecurityGroups != nil { + f11 := []*svcapitypes.CacheSecurityGroupMembership{} + for _, f11iter := range resp.CacheCluster.CacheSecurityGroups { + f11elem := &svcapitypes.CacheSecurityGroupMembership{} + if f11iter.CacheSecurityGroupName != nil { + f11elem.CacheSecurityGroupName = f11iter.CacheSecurityGroupName + } + if f11iter.Status != nil { + f11elem.Status = f11iter.Status + } + f11 = append(f11, f11elem) + } + ko.Status.CacheSecurityGroups = f11 + } else { + ko.Status.CacheSecurityGroups = nil + } + if resp.CacheCluster.CacheSubnetGroupName != nil { + ko.Spec.CacheSubnetGroupName = resp.CacheCluster.CacheSubnetGroupName + } else { + ko.Spec.CacheSubnetGroupName = nil + } + if resp.CacheCluster.ClientDownloadLandingPage != nil { + ko.Status.ClientDownloadLandingPage = resp.CacheCluster.ClientDownloadLandingPage + } else { + ko.Status.ClientDownloadLandingPage = nil + } + if resp.CacheCluster.ConfigurationEndpoint != nil { + f14 := &svcapitypes.Endpoint{} + if resp.CacheCluster.ConfigurationEndpoint.Address != nil { + f14.Address = resp.CacheCluster.ConfigurationEndpoint.Address + } + if resp.CacheCluster.ConfigurationEndpoint.Port != nil { + f14.Port = resp.CacheCluster.ConfigurationEndpoint.Port + } + ko.Status.ConfigurationEndpoint = f14 + } else { + ko.Status.ConfigurationEndpoint = nil + } + if resp.CacheCluster.Engine != nil { + ko.Spec.Engine = resp.CacheCluster.Engine + } else { + ko.Spec.Engine = nil + } + if resp.CacheCluster.EngineVersion != nil { + ko.Spec.EngineVersion = resp.CacheCluster.EngineVersion + } else { + ko.Spec.EngineVersion = nil + } + if resp.CacheCluster.IpDiscovery != nil { + ko.Spec.IPDiscovery = resp.CacheCluster.IpDiscovery + } else { + ko.Spec.IPDiscovery = nil + } + if resp.CacheCluster.NetworkType != nil { + ko.Spec.NetworkType = resp.CacheCluster.NetworkType + } else { + ko.Spec.NetworkType = nil + } + if resp.CacheCluster.NotificationConfiguration != nil { + f19 := &svcapitypes.NotificationConfiguration{} + if resp.CacheCluster.NotificationConfiguration.TopicArn != nil { + f19.TopicARN = resp.CacheCluster.NotificationConfiguration.TopicArn + } + if resp.CacheCluster.NotificationConfiguration.TopicStatus != nil { + f19.TopicStatus = resp.CacheCluster.NotificationConfiguration.TopicStatus + } + ko.Status.NotificationConfiguration = f19 + } else { + ko.Status.NotificationConfiguration = nil + } + if resp.CacheCluster.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = resp.CacheCluster.NumCacheNodes + } else { + ko.Spec.NumCacheNodes = nil + } + if resp.CacheCluster.PendingModifiedValues != nil { + f21 := &svcapitypes.PendingModifiedValues{} + if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != nil { + f21.AuthTokenStatus = resp.CacheCluster.PendingModifiedValues.AuthTokenStatus + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove != nil { + f21f1 := []*string{} + for _, f21f1iter := range resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove { + var f21f1elem string + f21f1elem = *f21f1iter + f21f1 = append(f21f1, &f21f1elem) + } + f21.CacheNodeIDsToRemove = f21f1 + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeType != nil { + f21.CacheNodeType = resp.CacheCluster.PendingModifiedValues.CacheNodeType + } + if resp.CacheCluster.PendingModifiedValues.EngineVersion != nil { + f21.EngineVersion = resp.CacheCluster.PendingModifiedValues.EngineVersion + } + if resp.CacheCluster.PendingModifiedValues.NumCacheNodes != nil { + f21.NumCacheNodes = resp.CacheCluster.PendingModifiedValues.NumCacheNodes + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled != nil { + f21.TransitEncryptionEnabled = resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != nil { + f21.TransitEncryptionMode = resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode + } + ko.Status.PendingModifiedValues = f21 + } else { + ko.Status.PendingModifiedValues = nil + } + if resp.CacheCluster.PreferredAvailabilityZone != nil { + ko.Spec.PreferredAvailabilityZone = resp.CacheCluster.PreferredAvailabilityZone + } else { + ko.Spec.PreferredAvailabilityZone = nil + } + if resp.CacheCluster.PreferredMaintenanceWindow != nil { + ko.Spec.PreferredMaintenanceWindow = resp.CacheCluster.PreferredMaintenanceWindow + } else { + ko.Spec.PreferredMaintenanceWindow = nil + } + if resp.CacheCluster.PreferredOutpostArn != nil { + ko.Spec.PreferredOutpostARN = resp.CacheCluster.PreferredOutpostArn + } else { + ko.Spec.PreferredOutpostARN = nil + } + if resp.CacheCluster.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = resp.CacheCluster.ReplicationGroupId + } else { + ko.Spec.ReplicationGroupID = nil + } + if resp.CacheCluster.ReplicationGroupLogDeliveryEnabled != nil { + ko.Status.ReplicationGroupLogDeliveryEnabled = resp.CacheCluster.ReplicationGroupLogDeliveryEnabled + } else { + ko.Status.ReplicationGroupLogDeliveryEnabled = nil + } + if resp.CacheCluster.SecurityGroups != nil { + f27 := []*svcapitypes.SecurityGroupMembership{} + for _, f27iter := range resp.CacheCluster.SecurityGroups { + f27elem := &svcapitypes.SecurityGroupMembership{} + if f27iter.SecurityGroupId != nil { + f27elem.SecurityGroupID = f27iter.SecurityGroupId + } + if f27iter.Status != nil { + f27elem.Status = f27iter.Status + } + f27 = append(f27, f27elem) + } + ko.Status.SecurityGroups = f27 + } else { + ko.Status.SecurityGroups = nil + } + if resp.CacheCluster.SnapshotRetentionLimit != nil { + ko.Spec.SnapshotRetentionLimit = resp.CacheCluster.SnapshotRetentionLimit + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if resp.CacheCluster.SnapshotWindow != nil { + ko.Spec.SnapshotWindow = resp.CacheCluster.SnapshotWindow + } else { + ko.Spec.SnapshotWindow = nil + } + if resp.CacheCluster.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = resp.CacheCluster.TransitEncryptionEnabled + } else { + ko.Spec.TransitEncryptionEnabled = nil + } + if resp.CacheCluster.TransitEncryptionMode != nil { + ko.Status.TransitEncryptionMode = resp.CacheCluster.TransitEncryptionMode + } else { + ko.Status.TransitEncryptionMode = nil + } + + rm.setStatusDefaults(ko) + // custom set output from response + ko, err = rm.customModifyCacheClusterSetOutput(ctx, desired, resp, ko) + if err != nil { + return nil, err + } + if pendingModifications := resp.CacheCluster.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } + + return &resource{ko}, nil +} + +// newUpdateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Update API call for the resource +func (rm *resourceManager) newUpdateRequestPayload( + ctx context.Context, + r *resource, + delta *ackcompare.Delta, +) (*svcsdk.ModifyCacheClusterInput, error) { + res := &svcsdk.ModifyCacheClusterInput{} + + if r.ko.Spec.AZMode != nil { + res.SetAZMode(*r.ko.Spec.AZMode) + } + res.SetApplyImmediately(true) + if r.ko.Spec.AuthToken != nil { + tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) + if err != nil { + return nil, ackrequeue.Needed(err) + } + if tmpSecret != "" { + res.SetAuthToken(tmpSecret) + } + } + if r.ko.Spec.AutoMinorVersionUpgrade != nil { + res.SetAutoMinorVersionUpgrade(*r.ko.Spec.AutoMinorVersionUpgrade) + } + if r.ko.Spec.CacheClusterID != nil { + res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + } + if r.ko.Spec.CacheNodeType != nil { + res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + } + if r.ko.Spec.CacheParameterGroupName != nil { + res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + } + if r.ko.Spec.CacheSecurityGroupNames != nil { + f9 := []*string{} + for _, f9iter := range r.ko.Spec.CacheSecurityGroupNames { + var f9elem string + f9elem = *f9iter + f9 = append(f9, &f9elem) + } + res.SetCacheSecurityGroupNames(f9) + } + if r.ko.Spec.EngineVersion != nil { + res.SetEngineVersion(*r.ko.Spec.EngineVersion) + } + if r.ko.Spec.IPDiscovery != nil { + res.SetIpDiscovery(*r.ko.Spec.IPDiscovery) + } + if r.ko.Spec.LogDeliveryConfigurations != nil { + f12 := []*svcsdk.LogDeliveryConfigurationRequest{} + for _, f12iter := range r.ko.Spec.LogDeliveryConfigurations { + f12elem := &svcsdk.LogDeliveryConfigurationRequest{} + if f12iter.DestinationDetails != nil { + f12elemf0 := &svcsdk.DestinationDetails{} + if f12iter.DestinationDetails.CloudWatchLogsDetails != nil { + f12elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} + if f12iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f12elemf0f0.SetLogGroup(*f12iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + } + f12elemf0.SetCloudWatchLogsDetails(f12elemf0f0) + } + if f12iter.DestinationDetails.KinesisFirehoseDetails != nil { + f12elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} + if f12iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f12elemf0f1.SetDeliveryStream(*f12iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + } + f12elemf0.SetKinesisFirehoseDetails(f12elemf0f1) + } + f12elem.SetDestinationDetails(f12elemf0) + } + if f12iter.DestinationType != nil { + f12elem.SetDestinationType(*f12iter.DestinationType) + } + if f12iter.Enabled != nil { + f12elem.SetEnabled(*f12iter.Enabled) + } + if f12iter.LogFormat != nil { + f12elem.SetLogFormat(*f12iter.LogFormat) + } + if f12iter.LogType != nil { + f12elem.SetLogType(*f12iter.LogType) + } + f12 = append(f12, f12elem) + } + res.SetLogDeliveryConfigurations(f12) + } + if r.ko.Spec.NotificationTopicARN != nil { + res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + } + if r.ko.Spec.NumCacheNodes != nil { + res.SetNumCacheNodes(*r.ko.Spec.NumCacheNodes) + } + if r.ko.Spec.PreferredMaintenanceWindow != nil { + res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + } + if r.ko.Spec.SecurityGroupIDs != nil { + f18 := []*string{} + for _, f18iter := range r.ko.Spec.SecurityGroupIDs { + var f18elem string + f18elem = *f18iter + f18 = append(f18, &f18elem) + } + res.SetSecurityGroupIds(f18) + } + if r.ko.Spec.SnapshotRetentionLimit != nil { + res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + } + if r.ko.Spec.SnapshotWindow != nil { + res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + } + + return res, nil +} + +// sdkDelete deletes the supplied resource in the backend AWS service API +func (rm *resourceManager) sdkDelete( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkDelete") + defer func() { + exit(err) + }() + if isDeleting(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgCurrentlyDeleting, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileDeleting + } + if isModifying(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgNoDeleteWhileModifying, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileModifying + } + + input, err := rm.newDeleteRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DeleteCacheClusterOutput + _ = resp + resp, err = rm.sdkapi.DeleteCacheClusterWithContext(ctx, input) + rm.metrics.RecordAPICall("DELETE", "DeleteCacheCluster", err) + return nil, err +} + +// newDeleteRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Delete API call for the resource +func (rm *resourceManager) newDeleteRequestPayload( + r *resource, +) (*svcsdk.DeleteCacheClusterInput, error) { + res := &svcsdk.DeleteCacheClusterInput{} + + if r.ko.Spec.CacheClusterID != nil { + res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + } + + return res, nil +} + +// setStatusDefaults sets default properties into supplied custom resource +func (rm *resourceManager) setStatusDefaults( + ko *svcapitypes.CacheCluster, +) { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if ko.Status.ACKResourceMetadata.Region == nil { + ko.Status.ACKResourceMetadata.Region = &rm.awsRegion + } + if ko.Status.ACKResourceMetadata.OwnerAccountID == nil { + ko.Status.ACKResourceMetadata.OwnerAccountID = &rm.awsAccountID + } + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } +} + +// updateConditions returns updated resource, true; if conditions were updated +// else it returns nil, false +func (rm *resourceManager) updateConditions( + r *resource, + onSuccess bool, + err error, +) (*resource, bool) { + ko := r.ko.DeepCopy() + rm.setStatusDefaults(ko) + + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + var recoverableCondition *ackv1alpha1.Condition = nil + var syncCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeRecoverable { + recoverableCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + syncCondition = condition + } + } + var termError *ackerr.TerminalError + if rm.terminalAWSError(err) || err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + var errorMessage = "" + if err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + errorMessage = err.Error() + } else { + awsErr, _ := ackerr.AWSError(err) + errorMessage = awsErr.Error() + } + terminalCondition.Status = corev1.ConditionTrue + terminalCondition.Message = &errorMessage + } else { + // Clear the terminal condition if no longer present + if terminalCondition != nil { + terminalCondition.Status = corev1.ConditionFalse + terminalCondition.Message = nil + } + // Handling Recoverable Conditions + if err != nil { + if recoverableCondition == nil { + // Add a new Condition containing a non-terminal error + recoverableCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeRecoverable, + } + ko.Status.Conditions = append(ko.Status.Conditions, recoverableCondition) + } + recoverableCondition.Status = corev1.ConditionTrue + awsErr, _ := ackerr.AWSError(err) + errorMessage := err.Error() + if awsErr != nil { + errorMessage = awsErr.Error() + } + recoverableCondition.Message = &errorMessage + } else if recoverableCondition != nil { + recoverableCondition.Status = corev1.ConditionFalse + recoverableCondition.Message = nil + } + } + // Required to avoid the "declared but not used" error in the default case + _ = syncCondition + if terminalCondition != nil || recoverableCondition != nil || syncCondition != nil { + return &resource{ko}, true // updated + } + return nil, false // not updated +} + +// terminalAWSError returns awserr, true; if the supplied error is an aws Error type +// and if the exception indicates that it is a Terminal exception +// 'Terminal' exception are specified in generator configuration +func (rm *resourceManager) terminalAWSError(err error) bool { + if err == nil { + return false + } + awsErr, ok := ackerr.AWSError(err) + if !ok { + return false + } + switch awsErr.Code() { + case "ReplicationGroupNotFoundFault", + "InvalidReplicationGroupStateFault", + "CacheClusterAlreadyExistsFault", + "InsufficientCacheClusterCapacityFault", + "CacheSecurityGroupNotFoundFault", + "CacheSubnetGroupNotFoundFault", + "ClusterQuotaForCustomerExceededFault", + "NodeQuotaForClusterExceededFault", + "NodeQuotaForCustomerExceededFault", + "CacheParameterGroupNotFoundFault", + "InvalidVPCNetworkStateFault", + "TagQuotaPerResource", + "InvalidParameterValue", + "InvalidParameterCombination": + return true + default: + return false + } +} + +// getImmutableFieldChanges returns list of immutable fields from the +func (rm *resourceManager) getImmutableFieldChanges( + delta *ackcompare.Delta, +) []string { + var fields []string + if delta.DifferentAt("Spec.CacheParameterGroupName") { + fields = append(fields, "CacheParameterGroupName") + } + if delta.DifferentAt("Spec.ReplicationGroupID") { + fields = append(fields, "ReplicationGroupID") + } + if delta.DifferentAt("Spec.SnapshotName") { + fields = append(fields, "SnapshotName") + } + + return fields +} diff --git a/pkg/resource/cache_cluster/tags.go b/pkg/resource/cache_cluster/tags.go new file mode 100644 index 00000000..269967a8 --- /dev/null +++ b/pkg/resource/cache_cluster/tags.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = svcapitypes.CacheCluster{} + _ = acktags.NewTags() +) + +// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// This method helps in creating the hub(acktags.Tags) for merging +// default controller tags with existing resource tags. +func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { + result := acktags.NewTags() + if tags == nil || len(tags) == 0 { + return result + } + + for _, t := range tags { + if t.Key != nil { + if t.Value == nil { + result[*t.Key] = "" + } else { + result[*t.Key] = *t.Value + } + } + } + + return result +} + +// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// This method helps in setting the tags back inside AWSResource after merging +// default controller tags with existing resource tags. +func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { + result := []*svcapitypes.Tag{} + for k, v := range tags { + kCopy := k + vCopy := v + tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + result = append(result, &tag) + } + return result +} diff --git a/pkg/resource/replication_group/delta_util.go b/pkg/resource/replication_group/delta_util.go index 1a1e3b8c..74cea39d 100644 --- a/pkg/resource/replication_group/delta_util.go +++ b/pkg/resource/replication_group/delta_util.go @@ -16,13 +16,12 @@ package replication_group import ( "encoding/json" "reflect" - "strconv" - "strings" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" ) // modifyDelta removes non-meaningful differences from the delta and adds additional differences if necessary @@ -34,7 +33,7 @@ func modifyDelta( if delta.DifferentAt("Spec.EngineVersion") { if desired.ko.Spec.EngineVersion != nil && latest.ko.Spec.EngineVersion != nil { - if engineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { + if util.EngineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { common.RemoveFromDelta(delta, "Spec.EngineVersion") } } @@ -69,32 +68,6 @@ func modifyDelta( } } -// returns true if desired and latest engine versions match and false otherwise -// precondition: both desiredEV and latestEV are non-nil -// this handles the case where only the major EV is specified, e.g. "6.x" (or similar), but the latest -// -// version shows the minor version, e.g. "6.0.5" -func engineVersionsMatch( - desiredEV string, - latestEV string, -) bool { - if desiredEV == latestEV { - return true - } - - dMaj, dMin, _ := versionNumbersFromString(desiredEV) - lMaj, lMin, _ := versionNumbersFromString(latestEV) - last := len(desiredEV) - 1 - - // if the last character of desiredEV is "x" or the major version is higher than 5, ignore patch version when comparing. - // See https://github.com/aws-controllers-k8s/community/issues/1737 - if dMaj > 5 || desiredEV[last:] == "x" { - return dMaj == lMaj && (dMin < 0 || dMin == lMin) - } - - return false -} - // logDeliveryRequiresUpdate retrieves the last requested configurations saved in annotations and compares them // to the current desired configurations func logDeliveryRequiresUpdate(desired *resource) bool { @@ -188,28 +161,3 @@ func primaryClusterIDRequiresUpdate(desired *resource, latest *resource) (bool, return false, nil } - -// versionNumbersFromString takes a version string like "6.2", "6.x" or "7.0.4" and -// returns the major, minor and patch numbers. If no minor or patch numbers are present -// or contain the "x" placeholder, -1 is returned for that version number. -func versionNumbersFromString(version string) (int, int, int) { - parts := strings.Split(version, ".") - major := -1 - minor := -1 - patch := -1 - if len(parts) == 0 { - return major, minor, patch - } - major, _ = strconv.Atoi(parts[0]) - if len(parts) > 1 { - if !strings.EqualFold(parts[1], "x") { - minor, _ = strconv.Atoi(parts[1]) - } - } - if len(parts) > 2 { - if !strings.EqualFold(parts[2], "x") { - patch, _ = strconv.Atoi(parts[2]) - } - } - return major, minor, patch -} diff --git a/pkg/resource/replication_group/delta_util_test.go b/pkg/resource/replication_group/delta_util_test.go deleted file mode 100644 index a3cbfb4e..00000000 --- a/pkg/resource/replication_group/delta_util_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import "testing" -import "github.com/stretchr/testify/require" - -func TestEngineVersionsMatch(t *testing.T) { - require := require.New(t) - - require.False(engineVersionsMatch("6.3", "6.2.6")) - require.True(engineVersionsMatch("6.2", "6.2.6")) - require.True(engineVersionsMatch("6.x", "6.0.5")) - require.False(engineVersionsMatch("13.x", "6.0.6")) - require.True(engineVersionsMatch("5.0.3", "5.0.3")) - require.False(engineVersionsMatch("5.0.3", "5.0.4")) -} diff --git a/pkg/resource/replication_group/hooks.go b/pkg/resource/replication_group/hooks.go index 8a292eab..0b2e1271 100644 --- a/pkg/resource/replication_group/hooks.go +++ b/pkg/resource/replication_group/hooks.go @@ -17,12 +17,10 @@ import ( "context" "errors" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" - ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" ) var ( @@ -48,10 +46,6 @@ var ( errors.New("Modify is in progress."), ackrequeue.DefaultRequeueAfterDuration, ) - requeueWaitWhileTagUpdated = ackrequeue.NeededAfter( - errors.New("tags Update is in progress"), - ackrequeue.DefaultRequeueAfterDuration, - ) ) // isDeleting returns true if supplied replication group resource state is 'deleting' @@ -91,119 +85,19 @@ func isCreateFailed(r *resource) bool { return status == statusCreateFailed } -// getTags retrieves the resource's associated tags +// getTags retrieves the resource's associated tags. func (rm *resourceManager) getTags( ctx context.Context, resourceARN string, ) ([]*svcapitypes.Tag, error) { - resp, err := rm.sdkapi.ListTagsForResourceWithContext( - ctx, - &svcsdk.ListTagsForResourceInput{ - ResourceName: &resourceARN, - }, - ) - rm.metrics.RecordAPICall("GET", "ListTagsForResource", err) - if err != nil { - return nil, err - } - tags := make([]*svcapitypes.Tag, 0, len(resp.TagList)) - for _, tag := range resp.TagList { - tags = append(tags, &svcapitypes.Tag{ - Key: tag.Key, - Value: tag.Value, - }) - } - return tags, nil + return util.GetTags(ctx, rm.sdkapi, rm.metrics, resourceARN) } -// syncTags keeps the resource's tags in sync -// -// NOTE(jaypipes): Elasticache's Tagging APIs differ from other AWS APIs in the -// following ways: -// -// 1. The names of the tagging API operations are different. Other APIs use the -// Tagris `ListTagsForResource`, `TagResource` and `UntagResource` API -// calls. RDS uses `ListTagsForResource`, `AddTagsToResource` and -// `RemoveTagsFromResource`. -// -// 2. Even though the name of the `ListTagsForResource` API call is the same, -// the structure of the input and the output are different from other APIs. -// For the input, instead of a `ResourceArn` field, Elasticache names the field -// `ResourceName`, but actually expects an ARN, not the replication group -// name. This is the same for the `AddTagsToResource` and -// `RemoveTagsFromResource` input shapes. For the output shape, the field is -// called `TagList` instead of `Tags` but is otherwise the same struct with -// a `Key` and `Value` member field. +// syncTags keeps the resource's tags in sync. func (rm *resourceManager) syncTags( ctx context.Context, desired *resource, latest *resource, ) (err error) { - rlog := ackrtlog.FromContext(ctx) - exit := rlog.Trace("rm.syncTags") - defer func() { exit(err) }() - - arn := (*string)(latest.ko.Status.ACKResourceMetadata.ARN) - - from := ToACKTags(latest.ko.Spec.Tags) - to := ToACKTags(desired.ko.Spec.Tags) - - added, _, removed := ackcompare.GetTagsDifference(from, to) - - // NOTE(jaypipes): According to the elasticache API documentation, adding a tag - // with a new value overwrites any existing tag with the same key. So, we - // don't need to do anything to "update" a Tag. Simply including it in the - // AddTagsToResource call is enough. - for key := range removed { - if _, ok := added[key]; ok { - delete(removed, key) - } - } - - // Modify tags causing the replication group to be modified and become unavailable temporarily - // so after adding or removing tags, we have to wait for the replication group to be available again - // process: add tags -> requeue -> remove tags -> requeue -> other update - if len(added) > 0 { - toAdd := make([]*svcsdk.Tag, 0, len(added)) - for key, val := range added { - key, val := key, val - toAdd = append(toAdd, &svcsdk.Tag{ - Key: &key, - Value: &val, - }) - } - - rlog.Debug("adding tags to replication group", "tags", added) - _, err = rm.sdkapi.AddTagsToResourceWithContext( - ctx, - &svcsdk.AddTagsToResourceInput{ - ResourceName: arn, - Tags: toAdd, - }, - ) - rm.metrics.RecordAPICall("UPDATE", "AddTagsToResource", err) - if err != nil { - return err - } - } else if len(removed) > 0 { - toRemove := make([]*string, 0, len(removed)) - for key := range removed { - key := key - toRemove = append(toRemove, &key) - } - rlog.Debug("removing tags from replication group", "tags", removed) - _, err = rm.sdkapi.RemoveTagsFromResourceWithContext( - ctx, - &svcsdk.RemoveTagsFromResourceInput{ - ResourceName: arn, - TagKeys: toRemove, - }, - ) - rm.metrics.RecordAPICall("UPDATE", "RemoveTagsFromResource", err) - if err != nil { - return err - } - } - - return requeueWaitWhileTagUpdated + return util.SyncTags(ctx, desired.ko.Spec.Tags, latest.ko.Spec.Tags, latest.ko.Status.ACKResourceMetadata, ToACKTags, rm.sdkapi, rm.metrics) } diff --git a/pkg/util/engine_version.go b/pkg/util/engine_version.go new file mode 100644 index 00000000..e83b6034 --- /dev/null +++ b/pkg/util/engine_version.go @@ -0,0 +1,60 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package util + +import ( + "strconv" + "strings" +) + +// EngineVersionsMatch returns true if desired and latest engine versions match and false otherwise +// precondition: both desiredEV and latestEV are non-nil +// this handles the case where only the major EV is specified, e.g. "6.x" (or similar), +// but the latest version shows the minor version, e.g. "6.0.5". +func EngineVersionsMatch(desiredEV, latestEV string) bool { + if desiredEV == latestEV { + return true + } + + dMaj, dMin := versionNumbersFromString(desiredEV) + lMaj, lMin := versionNumbersFromString(latestEV) + last := len(desiredEV) - 1 + + // if the last character of desiredEV is "x" or the major version is higher than 5, ignore patch version when comparing. + // See https://github.com/aws-controllers-k8s/community/issues/1737 + if dMaj > 5 || desiredEV[last:] == "x" { + return dMaj == lMaj && (dMin < 0 || dMin == lMin) + } + + return false +} + +// versionNumbersFromString takes a version string like "6.2", "6.x" or "7.0.4" and +// returns the major, minor and patch numbers. If no minor or patch numbers are present +// or contain the "x" placeholder, -1 is returned for that version number. +func versionNumbersFromString(version string) (int, int) { + parts := strings.Split(version, ".") + major := -1 + minor := -1 + if len(parts) == 0 { + return major, minor + } + major, _ = strconv.Atoi(parts[0]) + if len(parts) > 1 { + if !strings.EqualFold(parts[1], "x") { + minor, _ = strconv.Atoi(parts[1]) + } + } + return major, minor +} diff --git a/pkg/util/engine_version_test.go b/pkg/util/engine_version_test.go new file mode 100644 index 00000000..8da8d33f --- /dev/null +++ b/pkg/util/engine_version_test.go @@ -0,0 +1,69 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package util_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" +) + +func TestEngineVersionsMatch(t *testing.T) { + tests := []struct { + desiredVersion string + latestVersion string + expected bool + }{ + { + desiredVersion: "6.3", + latestVersion: "6.2.6", + expected: false, + }, + { + desiredVersion: "6.2", + latestVersion: "6.2.6", + expected: true, + }, + { + desiredVersion: "6.x", + latestVersion: "6.0.5", + expected: true, + }, + { + desiredVersion: "13.x", + latestVersion: "6.0.6", + expected: false, + }, + { + desiredVersion: "5.0.3", + latestVersion: "5.0.3", + expected: true, + }, + { + desiredVersion: "5.0.3", + latestVersion: "5.0.4", + expected: false, + }, + } + + for i, tt := range tests { + t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) { + require := require.New(t) + require.Equal(util.EngineVersionsMatch(tt.desiredVersion, tt.latestVersion), tt.expected) + }) + } +} diff --git a/pkg/util/tags.go b/pkg/util/tags.go new file mode 100644 index 00000000..9685019b --- /dev/null +++ b/pkg/util/tags.go @@ -0,0 +1,158 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package util + +import ( + "context" + "errors" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var requeueWaitWhileTagUpdated = ackrequeue.NeededAfter( + errors.New("tags Update is in progress"), + ackrequeue.DefaultRequeueAfterDuration, +) + +// GetTags retrieves the resource's associated tags. +func GetTags( + ctx context.Context, + sdkapi elasticacheiface.ElastiCacheAPI, + metrics *metrics.Metrics, + resourceARN string, +) ([]*svcapitypes.Tag, error) { + resp, err := sdkapi.ListTagsForResourceWithContext( + ctx, + &svcsdk.ListTagsForResourceInput{ + ResourceName: &resourceARN, + }, + ) + metrics.RecordAPICall("GET", "ListTagsForResource", err) + if err != nil { + return nil, err + } + tags := make([]*svcapitypes.Tag, 0, len(resp.TagList)) + for _, tag := range resp.TagList { + tags = append(tags, &svcapitypes.Tag{ + Key: tag.Key, + Value: tag.Value, + }) + } + return tags, nil +} + +// SyncTags keeps the resource's tags in sync +// +// NOTE(jaypipes): Elasticache's Tagging APIs differ from other AWS APIs in the +// following ways: +// +// 1. The names of the tagging API operations are different. Other APIs use the +// Tagris `ListTagsForResource`, `TagResource` and `UntagResource` API +// calls. RDS uses `ListTagsForResource`, `AddTagsToResource` and +// `RemoveTagsFromResource`. +// +// 2. Even though the name of the `ListTagsForResource` API call is the same, +// the structure of the input and the output are different from other APIs. +// For the input, instead of a `ResourceArn` field, Elasticache names the field +// `ResourceName`, but actually expects an ARN, not the cache cluster +// name. This is the same for the `AddTagsToResource` and +// `RemoveTagsFromResource` input shapes. For the output shape, the field is +// called `TagList` instead of `Tags` but is otherwise the same struct with +// a `Key` and `Value` member field. +func SyncTags( + ctx context.Context, + desiredTags []*svcapitypes.Tag, + latestTags []*svcapitypes.Tag, + latestACKResourceMetadata *ackv1alpha1.ResourceMetadata, + toACKTags func(tags []*svcapitypes.Tag) acktags.Tags, + sdkapi elasticacheiface.ElastiCacheAPI, + metrics *metrics.Metrics, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.syncTags") + defer func() { exit(err) }() + + arn := (*string)(latestACKResourceMetadata.ARN) + + from := toACKTags(latestTags) + to := toACKTags(desiredTags) + + added, _, removed := ackcompare.GetTagsDifference(from, to) + + // NOTE(jaypipes): According to the elasticache API documentation, adding a tag + // with a new value overwrites any existing tag with the same key. So, we + // don't need to do anything to "update" a Tag. Simply including it in the + // AddTagsToResource call is enough. + for key := range removed { + if _, ok := added[key]; ok { + delete(removed, key) + } + } + + // Modify tags causing the cache cluster to be modified and become unavailable temporarily + // so after adding or removing tags, we have to wait for the cache cluster to be available again + // process: add tags -> requeue -> remove tags -> requeue -> other update + if len(added) > 0 { + toAdd := make([]*svcsdk.Tag, 0, len(added)) + for key, val := range added { + key, val := key, val + toAdd = append(toAdd, &svcsdk.Tag{ + Key: &key, + Value: &val, + }) + } + + rlog.Debug("adding tags to cache cluster", "tags", added) + _, err = sdkapi.AddTagsToResourceWithContext( + ctx, + &svcsdk.AddTagsToResourceInput{ + ResourceName: arn, + Tags: toAdd, + }, + ) + metrics.RecordAPICall("UPDATE", "AddTagsToResource", err) + if err != nil { + return err + } + } else if len(removed) > 0 { + toRemove := make([]*string, 0, len(removed)) + for key := range removed { + key := key + toRemove = append(toRemove, &key) + } + rlog.Debug("removing tags from cache cluster", "tags", removed) + _, err = sdkapi.RemoveTagsFromResourceWithContext( + ctx, + &svcsdk.RemoveTagsFromResourceInput{ + ResourceName: arn, + TagKeys: toRemove, + }, + ) + metrics.RecordAPICall("UPDATE", "RemoveTagsFromResource", err) + if err != nil { + return err + } + } + + return requeueWaitWhileTagUpdated +} diff --git a/templates/hooks/cache_cluster/sdk_create_post_set_output.go.tpl b/templates/hooks/cache_cluster/sdk_create_post_set_output.go.tpl new file mode 100644 index 00000000..967aaeb8 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_create_post_set_output.go.tpl @@ -0,0 +1,6 @@ + if isCreating(&resource{ko}) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } diff --git a/templates/hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl b/templates/hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl new file mode 100644 index 00000000..11633009 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl @@ -0,0 +1,32 @@ + if isDeleting(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgCurrentlyDeleting, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileDeleting + } + if isModifying(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgNoDeleteWhileModifying, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileModifying + } diff --git a/templates/hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl b/templates/hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl new file mode 100644 index 00000000..cf067441 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl @@ -0,0 +1,27 @@ + if pendingModifications := ko.Status.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } + if isAvailable(r) { + ackcondition.SetSynced(&resource{ko}, corev1.ConditionTrue, nil, nil) + } else { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } diff --git a/templates/hooks/cache_cluster/sdk_update_post_build_request.go.tpl b/templates/hooks/cache_cluster/sdk_update_post_build_request.go.tpl new file mode 100644 index 00000000..c571ff4b --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_update_post_build_request.go.tpl @@ -0,0 +1,3 @@ + if err := rm.updateCacheClusterPayload(input, desired, latest, delta); err != nil { + return nil, ackerr.NewTerminalError(err) + } diff --git a/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl b/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl new file mode 100644 index 00000000..602943c0 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl @@ -0,0 +1,11 @@ + if pendingModifications := resp.CacheCluster.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } diff --git a/templates/hooks/cache_cluster/sdk_update_pre_build_request.go.tpl b/templates/hooks/cache_cluster/sdk_update_pre_build_request.go.tpl new file mode 100644 index 00000000..a1f55996 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_update_pre_build_request.go.tpl @@ -0,0 +1,9 @@ + if delta.DifferentAt("Spec.Tags") { + if err = rm.syncTags(ctx, desired, latest); err != nil { + return nil, err + } + } else if !delta.DifferentExcept("Spec.Tags") { + // If the only difference between the desired and latest is in the + // Spec.Tags field, we can skip the ModifyCacheCluster call. + return desired, nil + } diff --git a/test/e2e/resources/cache_cluster_simple.yaml b/test/e2e/resources/cache_cluster_simple.yaml new file mode 100644 index 00000000..8c437627 --- /dev/null +++ b/test/e2e/resources/cache_cluster_simple.yaml @@ -0,0 +1,16 @@ +# A simple CacheCluster manifest. +apiVersion: elasticache.services.k8s.aws/v1alpha1 +kind: CacheCluster +metadata: + name: $CACHE_CLUSTER_ID +spec: + cacheClusterID: $CACHE_CLUSTER_ID + cacheNodeType: cache.t3.micro + numCacheNodes: 2 + engine: memcached + autoMinorVersionUpgrade: false + tags: + - key: t1 + value: v1 + - key: t2 + value: v2 diff --git a/test/e2e/service_cleanup.py b/test/e2e/service_cleanup.py index b3e16543..c0ea496c 100644 --- a/test/e2e/service_cleanup.py +++ b/test/e2e/service_cleanup.py @@ -146,6 +146,6 @@ def service_cleanup(config: dict): logging.exception(f"Unable to delete Elasticache cache parameter group {resources.CPGName}") -if __name__ == "__main__": +if __name__ == "__main__": bootstrap_config = read_bootstrap_config(bootstrap_directory) - service_cleanup(bootstrap_config) \ No newline at end of file + service_cleanup(bootstrap_config) diff --git a/test/e2e/tests/test_cache_cluster.py b/test/e2e/tests/test_cache_cluster.py new file mode 100644 index 00000000..a05aabbb --- /dev/null +++ b/test/e2e/tests/test_cache_cluster.py @@ -0,0 +1,206 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Integration tests for the ElastiCache CacheCluster resource +""" + +import boto3 +import logging +from time import sleep + +import pytest + +from acktest.resources import random_suffix_name +from acktest.k8s import resource as k8s +from acktest.k8s import condition +from acktest import tags as tagutil +from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_elasticache_resource +from e2e.replacement_values import REPLACEMENT_VALUES + +RESOURCE_PLURAL = "cacheclusters" + +# Time to wait after modifying the CR for the status to change +MODIFY_WAIT_AFTER_SECONDS = 120 + +# Time to wait after the cluster has changed status, for the CR to update +CHECK_STATUS_WAIT_SECONDS = 120 + +TAGS_PATCH_WAIT_TIME = 120 + + +def wait_for_cache_cluster_available(elasticache_client, cache_cluster_id): + waiter = elasticache_client.get_waiter( + 'cache_cluster_available', + ) + waiter.config.delay = 5 + waiter.config.max_attempts = 240 + waiter.wait(CacheClusterId=cache_cluster_id) + + +def wait_until_deleted(elasticache_client, cache_cluster_id): + waiter = elasticache_client.get_waiter( + 'cache_cluster_deleted', + ) + waiter.config.delay = 5 + waiter.config.max_attempts = 240 + waiter.wait(CacheClusterId=cache_cluster_id) + + +def get_and_assert_status(ref: k8s.CustomResourceReference, expected_status: str, expected_synced: bool): + cr = k8s.get_resource(ref) + assert cr is not None + assert 'status' in cr + + assert cr['status']['cacheClusterStatus'] == expected_status + + if expected_synced: + condition.assert_synced(ref) + else: + condition.assert_not_synced(ref) + + +@pytest.fixture(scope="module") +def elasticache_client(): + return boto3.client('elasticache') + + +@pytest.fixture +def simple_cache_cluster(elasticache_client): + cache_cluster_id = random_suffix_name("simple-cache-cluster", 32) + + replacements = REPLACEMENT_VALUES.copy() + replacements["CACHE_CLUSTER_ID"] = cache_cluster_id + + resource_data = load_elasticache_resource( + "cache_cluster_simple", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + # Create the k8s resource + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + cache_cluster_id, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref, wait_periods=15, period_length=20) + + logging.info("resource consumed by controller") + + assert cr is not None + assert k8s.get_resource_exists(ref) + + res = k8s.get_resource(ref) + print(res) + + yield (ref, cr) + + # Try to delete, if doesn't already exist + try: + _, deleted = k8s.delete_custom_resource(ref, 3, 10) + assert deleted + wait_until_deleted(elasticache_client, cache_cluster_id) + except: + pass + + +@service_marker +@pytest.mark.canary +class TestCacheCluster: + def test_create_update_delete_cache_cluster(self, elasticache_client, simple_cache_cluster): + (ref, cr) = simple_cache_cluster + + cache_cluster_id = cr["spec"]["cacheClusterID"] + + logging.info("starting cache cluster test") + logging.info(cache_cluster_id) + try: + aws_res = elasticache_client.describe_cache_clusters(CacheClusterId=cache_cluster_id) + assert len(aws_res["CacheClusters"]) == 1 + print(aws_res['CacheClusters']) + except elasticache_client.exceptions.CacheClusterNotFoundFault: + pytest.fail(f"Could not find cache cluster '{cache_cluster_id}' in ElastiCache") + + logging.info("waiting for cluster to become available") + wait_for_cache_cluster_available(elasticache_client, cache_cluster_id) + + updates = { + "spec": { + "numCacheNodes": 3, + "autoMinorVersionUpgrade": True + } + } + k8s.patch_custom_resource(ref, updates) + logging.info("patched resource") + print(updates) + sleep(MODIFY_WAIT_AFTER_SECONDS) + + # Ensure status is updating properly and set as not synced + get_and_assert_status(ref, 'modifying', False) + + # Wait for the status to become available again + wait_for_cache_cluster_available(elasticache_client, cache_cluster_id) + logging.info("update complete") + + # Ensure status is updated properly once it has become active + sleep(CHECK_STATUS_WAIT_SECONDS) + get_and_assert_status(ref, 'available', True) + + aws_res = elasticache_client.describe_cache_clusters(CacheClusterId=cache_cluster_id) + assert len(aws_res["CacheClusters"]) == 1 + cache_cluster = aws_res["CacheClusters"][0] + assert cache_cluster['NumCacheNodes'] == 3 + assert cache_cluster['AutoMinorVersionUpgrade'] + + updates = { + "spec": { + "numCacheNodes": 4, + "preferredAvailabilityZones": ["us-west-2a"] + } + } + + k8s.patch_custom_resource(ref, updates) + sleep(MODIFY_WAIT_AFTER_SECONDS) + get_and_assert_status(ref, 'modifying', False) + + wait_for_cache_cluster_available(elasticache_client, cache_cluster_id) + + aws_res = elasticache_client.describe_cache_clusters(CacheClusterId=cache_cluster_id) + assert len(aws_res['CacheClusters']) == 1 + cache_cluster = aws_res['CacheClusters'][0] + assert cache_cluster['NumCacheNodes'] == 4 + + updates = { + "spec": { + "tags": [ + { + "key": "k1", + "value": "v1" + }, + { + "key": "k2", + "value": "v2" + } + ] + } + } + + k8s.patch_custom_resource(ref, updates) + sleep(TAGS_PATCH_WAIT_TIME) + tag_list = elasticache_client.list_tags_for_resource(ResourceName=cr['status']['ackResourceMetadata']['arn']) + tags = tagutil.clean(tag_list['TagList']) + assert len(tags) == 2 + assert tags == [{"Key": "k1", "Value": "v1"}, {"Key": "k2", "Value": "v2"}] + + k8s.delete_custom_resource(ref) + wait_until_deleted(elasticache_client, cache_cluster_id)