diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 00000000..b2d22ad6 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,6 @@ +[codespell] +skip = .codespellrc,./.git,./go.local.sum,./pkg/testdata +ignore-words-list = specfield,uptodate,fpr +check-filenames = +check-hidden = +quiet = 2 diff --git a/ATTRIBUTION.md b/ATTRIBUTION.md index cbc95783..26deca1f 100644 --- a/ATTRIBUTION.md +++ b/ATTRIBUTION.md @@ -95,7 +95,7 @@ syntax/tree.go (from RegexTree.cs and RegexNode.cs): ported literally as possibl syntax/writer.go (from RegexWriter.cs): ported literally with minor changes to make it more Go-ish. -match.go (from RegexMatch.cs): ported, simplified, and changed to handle Go's lack of inheritence. +match.go (from RegexMatch.cs): ported, simplified, and changed to handle Go's lack of inheritance. regexp.go (from Regex.cs and RegexOptions.cs): conceptually serves the same "starting point", but is simplified and changed to handle differences in C# strings and Go strings/runes. diff --git a/pkg/config/resource.go b/pkg/config/resource.go index 7e96118a..bdfe9945 100644 --- a/pkg/config/resource.go +++ b/pkg/config/resource.go @@ -130,7 +130,7 @@ type SyncedConfig struct { When []SyncedCondition `json:"when"` } -// SyncedCondition represent one of the unique condition that should be fullfiled in +// SyncedCondition represent one of the unique condition that should be fulfilled in // order to assert whether a resource is synced. type SyncedCondition struct { // Path of the field. e.g Status.Processing @@ -343,7 +343,7 @@ type AdditionalColumnConfig struct { } // PrintConfig informs instruct the code generator on how to sort kubebuilder -// printcolumn marker coments. +// printcolumn marker comments. type PrintConfig struct { // AddAgeColumn a boolean informing the code generator whether to append a kubebuilder // marker comment to show a resource Age (created since date) in `kubectl get` response. diff --git a/pkg/generate/code/compare.go b/pkg/generate/code/compare.go index a1c13b5d..daa395d3 100644 --- a/pkg/generate/code/compare.go +++ b/pkg/generate/code/compare.go @@ -250,7 +250,7 @@ func CompareResource( // compareNil outputs Go code that compares two field values for nullability // and, if there is a nil difference, adds the difference to a variable -// represeting the `ackcompare.Delta` +// representing the `ackcompare.Delta` // // Output code will look something like this: // diff --git a/pkg/metadata/generation_metadata.go b/pkg/metadata/generation_metadata.go index 59e3ba56..5d6dc7f1 100644 --- a/pkg/metadata/generation_metadata.go +++ b/pkg/metadata/generation_metadata.go @@ -60,7 +60,7 @@ type GenerationMetadata struct { LastModification lastModificationInfo `json:"last_modification"` // AWS SDK Go version used generate the APIs AWSSDKGoVersion string `json:"aws_sdk_go_version"` - // Informatiom about the ack-generate binary used to generate the APIs + // Information about the ack-generate binary used to generate the APIs ACKGenerateInfo ackGenerateInfo `json:"ack_generate_info"` // Information about the generator config file used to generate the APIs GeneratorConfigInfo generatorConfigInfo `json:"generator_config_info"` diff --git a/pkg/model/model_apigwv2_test.go b/pkg/model/model_apigwv2_test.go index fc09d3cf..58831b11 100644 --- a/pkg/model/model_apigwv2_test.go +++ b/pkg/model/model_apigwv2_test.go @@ -75,7 +75,7 @@ func TestAPIGatewayV2_Api(t *testing.T) { assert.NotNil(crd.SpecFields["Basepath"]) assert.NotNil(crd.SpecFields["FailOnWarnings"]) - // The required property should get overriden for Name and ProtocolType fields. + // The required property should get overridden for Name and ProtocolType fields. assert.False(crd.SpecFields["Name"].IsRequired()) assert.False(crd.SpecFields["ProtocolType"].IsRequired()) diff --git a/pkg/model/multiversion/delta.go b/pkg/model/multiversion/delta.go index 2ddf1eb8..55610eac 100644 --- a/pkg/model/multiversion/delta.go +++ b/pkg/model/multiversion/delta.go @@ -110,7 +110,7 @@ func ComputeFieldDeltas( ) ([]FieldDelta, error) { deltas := []FieldDelta{} - // collect field names and sort them to ensure a determenistic output order. + // collect field names and sort them to ensure a deterministic output order. srcNames := []string{} for name := range srcFields { srcNames = append(srcNames, name) @@ -134,7 +134,7 @@ func ComputeFieldDeltas( dstField, ok := dstFields[srcName] // If a field is found in both arrays only three changes are possible: // None, TypeChange and ChangeTypeShapeChangedToSecret. - // NOTE(a-hilaly): carefull about X -> Y then Z -> X renames. It should + // NOTE(a-hilaly): careful about X -> Y then Z -> X renames. It should // not be allowed. if ok { // mark field as visited. @@ -188,7 +188,7 @@ func ComputeFieldDeltas( dstField, ok2 := dstFields[newName] if !ok2 { // if a field was renamed and we can't find it in dstNames, something - // very wrong happend during CRD loading. + // very wrong happened during CRD loading. return nil, fmt.Errorf("cannot find renamed field %s " + newName) } diff --git a/pkg/model/multiversion/delta_renames_test.go b/pkg/model/multiversion/delta_renames_test.go index 72127573..649e91f9 100644 --- a/pkg/model/multiversion/delta_renames_test.go +++ b/pkg/model/multiversion/delta_renames_test.go @@ -137,7 +137,7 @@ func Test_computeRenames(t *testing.T) { return } - // Since 1.12 formating functions prints maps in key-sorted order. + // Since 1.12 formatting functions prints maps in key-sorted order. // See https://golang.org/doc/go1.12#fmt if fmt.Sprintf("%v", got) != fmt.Sprintf("%v", tt.want) { t.Errorf("computeRenamesDelta() = %v, want %v", got, tt.want) diff --git a/pkg/model/multiversion/manager.go b/pkg/model/multiversion/manager.go index 778e455c..f4ecf0dc 100644 --- a/pkg/model/multiversion/manager.go +++ b/pkg/model/multiversion/manager.go @@ -34,7 +34,7 @@ var ( ) // APIVersionManager is a API versions manager. It contains the mapping -// of each non-deprecated version with their correspending ackmodel.Model +// of each non-deprecated version with their corresponding ackmodel.Model // and APIInfos. type APIVersionManager struct { gitRepo *git.Repository diff --git a/pkg/model/op_test.go b/pkg/model/op_test.go index 5eca215f..42e8a56b 100644 --- a/pkg/model/op_test.go +++ b/pkg/model/op_test.go @@ -121,8 +121,8 @@ func TestGetOpTypeAndResourceNameFromOpID(t *testing.T) { }, } for _, test := range tests { - ot, resName := model.GetOpTypeAndResourceNameFromOpID(test.opID, g.GetConfig()) - assert.Equal(test.expOpType, ot, test.opID) + opType, resName := model.GetOpTypeAndResourceNameFromOpID(test.opID, g.GetConfig()) + assert.Equal(test.expOpType, opType, test.opID) assert.Equal(test.expResName, resName, test.opID) } } @@ -154,8 +154,8 @@ func TestGetOpTypeAndResourceNameFromOpID_PluralSingular(t *testing.T) { }, } for _, test := range tests { - ot, resName := model.GetOpTypeAndResourceNameFromOpID(test.opID, g.GetConfig()) - assert.Equal(test.expOpType, ot, test.opID) + opType, resName := model.GetOpTypeAndResourceNameFromOpID(test.opID, g.GetConfig()) + assert.Equal(test.expOpType, opType, test.opID) assert.Equal(test.expResName, resName, test.opID) } } diff --git a/pkg/model/sdk_api.go b/pkg/model/sdk_api.go index 74db9aa8..2d9460b0 100644 --- a/pkg/model/sdk_api.go +++ b/pkg/model/sdk_api.go @@ -105,8 +105,8 @@ func (a *SDKAPI) GetOperationMap(cfg *ackgenconfig.Config) *OperationMap { if !found { panic("operation " + opID + " in generator.yaml 'operations:' object does not exist.") } - for _, ot := range opCfg.OperationType { - opType := OpTypeFromString(ot) + for _, operationType := range opCfg.OperationType { + opType := OpTypeFromString(operationType) opMap[opType][opCfg.ResourceName] = op } } diff --git a/pkg/testdata/models/apis/codedeploy/0000-00-00/api-2.json b/pkg/testdata/models/apis/codedeploy/0000-00-00/api-2.json index cf70b6f6..b9b68c20 100644 --- a/pkg/testdata/models/apis/codedeploy/0000-00-00/api-2.json +++ b/pkg/testdata/models/apis/codedeploy/0000-00-00/api-2.json @@ -1691,7 +1691,7 @@ "ECSTaskSet":{ "type":"structure", "members":{ - "identifer":{"shape":"ECSTaskSetIdentifier"}, + "identifier":{"shape":"ECSTaskSetIdentifier"}, "desiredCount":{"shape":"ECSTaskSetCount"}, "pendingCount":{"shape":"ECSTaskSetCount"}, "runningCount":{"shape":"ECSTaskSetCount"}, diff --git a/pkg/testdata/models/apis/codedeploy/0000-00-00/docs-2.json b/pkg/testdata/models/apis/codedeploy/0000-00-00/docs-2.json index f65eb43d..d10cb9fa 100644 --- a/pkg/testdata/models/apis/codedeploy/0000-00-00/docs-2.json +++ b/pkg/testdata/models/apis/codedeploy/0000-00-00/docs-2.json @@ -45,7 +45,7 @@ "SkipWaitTimeForInstanceTermination": "
In a blue/green deployment, overrides any specified wait time and starts terminating instances immediately after the traffic routing is complete.
", "StopDeployment": "Attempts to stop an ongoing deployment.
", "TagResource": " Associates the list of tags in the input Tags parameter with the resource identified by the ResourceArn input parameter.
Disassociates a resource from a list of tags. The resource is identified by the ResourceArn input parameter. The tags are identfied by the list of keys in the TagKeys input parameter.
Disassociates a resource from a list of tags. The resource is identified by the ResourceArn input parameter. The tags are identified by the list of keys in the TagKeys input parameter.
Changes the name of an application.
", "UpdateDeploymentGroup": "Changes information about a deployment group.
" }, @@ -185,7 +185,7 @@ "refs": { "ListTagsForResourceInput$ResourceArn": " The ARN of a CodeDeploy resource. ListTagsForResource returns all the tags associated with the resource that is identified by the ResourceArn.
The ARN of a resource, such as a CodeDeploy application or deployment group.
", - "UntagResourceInput$ResourceArn": " The ARN that specifies from which resource to disassociate the tags with the keys in the TagKeys input paramter.
The ARN that specifies from which resource to disassociate the tags with the keys in the TagKeys input parameter.
Information about the deployment target.
", "refs": { "DeploymentTargetList$member": null, - "GetDeploymentTargetOutput$deploymentTarget": " A deployment target that contains information about a deployment such as its status, lifecyle events, and when it was last updated. It also contains metadata about the deployment target. The deployment target metadata depends on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget).
A deployment target that contains information about a deployment such as its status, lifecycle events, and when it was last updated. It also contains metadata about the deployment target. The deployment target metadata depends on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget).
A unique ID of an ECSTaskSet.
A unique ID of an ECSTaskSet.
Searches one or more transit gateway multicast groups and returns the group membership information.
", "SearchTransitGatewayRoutes": "Searches for routes in the specified transit gateway route table.
", "SendDiagnosticInterrupt": "Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).
In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.
Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.
For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).
", - "StartInstances": "Starts an Amazon EBS-backed instance that you've previously stopped.
Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.
Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.
Performing this operation on an instance that uses an instance store as its root device returns an error.
If you attempt to start a T3 instance with host tenancy and the unlimted CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated.
For more information, see Stopping instances in the Amazon EC2 User Guide.
", + "StartInstances": "Starts an Amazon EBS-backed instance that you've previously stopped.
Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.
Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.
Performing this operation on an instance that uses an instance store as its root device returns an error.
If you attempt to start a T3 instance with host tenancy and the unlimited CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated.
For more information, see Stopping instances in the Amazon EC2 User Guide.
", "StartNetworkInsightsAnalysis": "Starts analyzing the specified path. If the path is reachable, the operation returns the shortest feasible path.
", "StartVpcEndpointServicePrivateDnsVerification": "Initiates the verification process to prove that the service provider owns the private DNS name domain for the endpoint service.
The service provider must successfully perform the verification before the consumer can use the name to access the service.
Before the service provider runs this command, they must add a record to the DNS server. For more information, see Adding a TXT Record to Your Domain's DNS Server in the Amazon VPC User Guide.
", "StopInstances": "Stops an Amazon EBS-backed instance.
You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.
You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.
When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.
Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.
When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting stopping your instance in the Amazon EC2 User Guide.
", @@ -1945,7 +1945,7 @@ "InstanceRequirements$RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.
This parameter is not supported for GetSpotPlacementScores.
Default: false
Indicates whether instance types must support hibernation for On-Demand Instances.
This parameter is not supported for GetSpotPlacementScores.
Default: false
Excludes the root volume from being snapshotted.
", - "InstanceTagNotificationAttribute$IncludeAllTagsOfInstance": "Indicates wheter all tag keys in the current Region are registered to appear in scheduled event notifications. true indicates that all tag keys in the current Region are registered.
Indicates whether all tag keys in the current Region are registered to appear in scheduled event notifications. true indicates that all tag keys in the current Region are registered.
Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.
Default: false
Indicates whether the EBS volume is encrypted.
", "LaunchTemplateEbsBlockDevice$DeleteOnTermination": "Indicates whether the EBS volume is deleted on instance termination.
", @@ -14811,7 +14811,7 @@ "AssociateVpcCidrBlockRequest$Ipv6CidrBlock": "An IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool in the request.
To let Amazon choose the IPv6 CIDR block for you, omit this parameter.
", "AssociateVpcCidrBlockResult$VpcId": "The ID of the VPC.
", "AssociatedRole$CertificateS3BucketName": "The name of the Amazon S3 bucket in which the Amazon S3 object is stored.
", - "AssociatedRole$CertificateS3ObjectKey": "The key of the Amazon S3 object ey where the certificate, certificate chain, and encrypted private key bundle is stored. The object key is formated as follows: role_arn/certificate_arn.
The key of the Amazon S3 object ey where the certificate, certificate chain, and encrypted private key bundle is stored. The object key is formatted as follows: role_arn/certificate_arn.
The ID of the KMS customer master key (CMK) used to encrypt the private key.
", "AssociatedTargetNetwork$NetworkId": "The ID of the subnet.
", "AssociationStatus$Message": "A message about the status of the target network association, if applicable.
", diff --git a/pkg/testdata/models/apis/ec2/0000-00-00/generator.yaml b/pkg/testdata/models/apis/ec2/0000-00-00/generator.yaml index 67a5b5da..af301a2f 100644 --- a/pkg/testdata/models/apis/ec2/0000-00-00/generator.yaml +++ b/pkg/testdata/models/apis/ec2/0000-00-00/generator.yaml @@ -75,7 +75,7 @@ operations: CreateVpcEndpoint: output_wrapper_field_path: VpcEndpoint RunInstances: - #ouput shape: Reservation + #output shape: Reservation output_wrapper_field_path: Instances operation_type: - Create diff --git a/pkg/testdata/models/apis/ecr/0000-00-01/docs.json b/pkg/testdata/models/apis/ecr/0000-00-01/docs.json index f258f1ae..3f0227b7 100644 --- a/pkg/testdata/models/apis/ecr/0000-00-01/docs.json +++ b/pkg/testdata/models/apis/ecr/0000-00-01/docs.json @@ -891,7 +891,7 @@ "refs": { "InitiateLayerUploadResponse$partSize": "The size, in bytes, that Amazon ECR expects future layer part uploads to be.
", "InvalidLayerPartException$lastValidByteReceived": "The last valid byte received from the layer part upload that is associated with the exception.
", - "UploadLayerPartRequest$partFirstByte": "The position of the first byte of the layer part witin the overall image layer.
", + "UploadLayerPartRequest$partFirstByte": "The position of the first byte of the layer part within the overall image layer.
", "UploadLayerPartRequest$partLastByte": "The position of the last byte of the layer part within the overall image layer.
", "UploadLayerPartResponse$lastByteReceived": "The integer value of the last byte received in the request.
" } diff --git a/pkg/testdata/models/apis/elasticache/0000-00-00/docs-2.json b/pkg/testdata/models/apis/elasticache/0000-00-00/docs-2.json index d4e1c9e4..fa7f1f79 100644 --- a/pkg/testdata/models/apis/elasticache/0000-00-00/docs-2.json +++ b/pkg/testdata/models/apis/elasticache/0000-00-00/docs-2.json @@ -66,7 +66,7 @@ "ResetCacheParameterGroup": "Modifies the parameters of a cache parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire cache parameter group, specify the ResetAllParameters and CacheParameterGroupName parameters.
Revokes ingress from a cache security group. Use this operation to disallow access from an Amazon EC2 security group that had been previously authorized.
", "StartMigration": "Start the migration of data.
", - "TestFailover": "Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).
Note the following
A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.
If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.
If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.
To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:
Replication group message: Test Failover API called for node group <node-group-id>
Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Cache cluster message: Recovering cache nodes <node-id>
Cache cluster message: Finished recovery for cache nodes <node-id>
For more information see:
Viewing ElastiCache Events in the ElastiCache User Guide
DescribeEvents in the ElastiCache API Reference
Also see, Testing Multi-AZ in the ElastiCache User Guide.
" + "TestFailover": "Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).
Note the following
A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.
If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.
If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.
To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrence:
Replication group message: Test Failover API called for node group <node-group-id>
Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Cache cluster message: Recovering cache nodes <node-id>
Cache cluster message: Finished recovery for cache nodes <node-id>
For more information see:
Viewing ElastiCache Events in the ElastiCache User Guide
DescribeEvents in the ElastiCache API Reference
Also see, Testing Multi-AZ in the ElastiCache User Guide.
" }, "shapes": { "APICallRateForCustomerExceededFault": { @@ -1581,7 +1581,7 @@ "PreferredAvailabilityZoneList": { "base": null, "refs": { - "ConfigureShard$PreferredAvailabilityZones": "A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache for Redis selects the availability zone for each of the replicas.
A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The number of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache for Redis selects the availability zone for each of the replicas.
A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.
This option is only supported on Memcached.
If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheNodes.
If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
", "ModifyCacheClusterMessage$NewAvailabilityZones": "This option is only supported on Memcached clusters.
The list of Availability Zones where the new Memcached cache nodes are created.
This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.
Scenarios:
Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes.
Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.
Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3 to cancel all pending operations.
The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes.
If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.
Impact of new add/remove requests upon pending requests
Scenario-1
Pending Action: Delete
New Request: Delete
Result: The new delete, pending or immediate, replaces the pending delete.
Scenario-2
Pending Action: Delete
New Request: Create
Result: The new create, pending or immediate, replaces the pending delete.
Scenario-3
Pending Action: Create
New Request: Delete
Result: The new delete, pending or immediate, replaces the pending create.
Scenario-4
Pending Action: Create
New Request: Create
Result: The new create is added to the pending create.
Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.
The client ID to use for OAuth authorization for the connection.
", "CreateConnectionOAuthClientRequestParameters$ClientSecret": "The client secret associated with the client ID to use for OAuth authorization for the connection.
", "UpdateConnectionApiKeyAuthRequestParameters$ApiKeyName": "The name of the API key to use for authorization.
", - "UpdateConnectionApiKeyAuthRequestParameters$ApiKeyValue": "The value associated with teh API key to use for authorization.
", + "UpdateConnectionApiKeyAuthRequestParameters$ApiKeyValue": "The value associated with the API key to use for authorization.
", "UpdateConnectionBasicAuthRequestParameters$Username": "The user name to use for Basic authorization.
", "UpdateConnectionBasicAuthRequestParameters$Password": "The password associated with the user name to use for Basic authorization.
", "UpdateConnectionOAuthClientRequestParameters$ClientID": "The client ID to use for OAuth authorization.
", - "UpdateConnectionOAuthClientRequestParameters$ClientSecret": "The client secret assciated with the client ID to use for OAuth authorization.
" + "UpdateConnectionOAuthClientRequestParameters$ClientSecret": "The client secret associated with the client ID to use for OAuth authorization.
" } }, "AwsVpcConfiguration": { @@ -2230,7 +2230,7 @@ "TransformerInput": { "base": null, "refs": { - "InputTransformer$InputTemplate": "Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value> The InputTemplate must be valid JSON.
If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:
The placeholder cannot be used as an object key.
The following example shows the syntax for using InputPathsMap and InputTemplate.
\"InputTransformer\":
{
\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},
\"InputTemplate\": \"<instance> is in state <status>\"
}
To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:
\"InputTransformer\":
{
\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},
\"InputTemplate\": \"<instance> is in state \\\"<status>\\\"\"
}
The InputTemplate can also be valid JSON with varibles in quotes or out, as in the following example:
\"InputTransformer\":
{
\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},
\"InputTemplate\": '{\"myInstance\": <instance>,\"myStatus\": \"<instance> is in state \\\"<status>\\\"\"}'
}
Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value> The InputTemplate must be valid JSON.
If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:
The placeholder cannot be used as an object key.
The following example shows the syntax for using InputPathsMap and InputTemplate.
\"InputTransformer\":
{
\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},
\"InputTemplate\": \"<instance> is in state <status>\"
}
To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:
\"InputTransformer\":
{
\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},
\"InputTemplate\": \"<instance> is in state \\\"<status>\\\"\"
}
The InputTemplate can also be valid JSON with variables in quotes or out, as in the following example:
\"InputTransformer\":
{
\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},
\"InputTemplate\": '{\"myInstance\": <instance>,\"myStatus\": \"<instance> is in state \\\"<status>\\\"\"}'
}
Unique identifer for the Code signing configuration.
" + "CodeSigningConfig$CodeSigningConfigId": "Unique identifier for the Code signing configuration.
" } }, "CodeSigningConfigList": { @@ -428,7 +428,7 @@ } }, "EFSIOException": { - "base": "An error occured when reading from or writing to a connected file system.
", + "base": "An error occurred when reading from or writing to a connected file system.
", "refs": { } }, diff --git a/pkg/testdata/models/apis/memorydb/0000-00-00/docs-2.json b/pkg/testdata/models/apis/memorydb/0000-00-00/docs-2.json index 32a6ef4f..9b2c3a82 100644 --- a/pkg/testdata/models/apis/memorydb/0000-00-00/docs-2.json +++ b/pkg/testdata/models/apis/memorydb/0000-00-00/docs-2.json @@ -4,13 +4,13 @@ "operations": { "BatchUpdateCluster": "Apply the service update to a list of clusters supplied. For more information on service updates and applying them, see Applying the service updates.
", "CopySnapshot": "Makes a copy of an existing snapshot.
", - "CreateACL": "Creates an Access Control List. For more information, see Authenticating users with Access Contol Lists (ACLs).
", + "CreateACL": "Creates an Access Control List. For more information, see Authenticating users with Access Control Lists (ACLs).
", "CreateCluster": "Creates a cluster. All nodes in the cluster run the same protocol-compliant engine software.
", "CreateParameterGroup": "Creates a new MemoryDB parameter group. A parameter group is a collection of parameters and their values that are applied to all of the nodes in any cluster. For more information, see Configuring engine parameters using parameter groups.
", "CreateSnapshot": "Creates a copy of an entire cluster at a specific moment in time.
", "CreateSubnetGroup": "Creates a subnet group. A subnet group is a collection of subnets (typically private) that you can designate for your clusters running in an Amazon Virtual Private Cloud (VPC) environment. When you create a cluster in an Amazon VPC, you must specify a subnet group. MemoryDB uses that subnet group to choose a subnet and IP addresses within that subnet to associate with your nodes. For more information, see Subnets and subnet groups.
", - "CreateUser": "Creates a MemoryDB user. For more information, see Authenticating users with Access Contol Lists (ACLs).
", - "DeleteACL": "Deletes an Access Control List. The ACL must first be disassociated from the cluster before it can be deleted. For more information, see Authenticating users with Access Contol Lists (ACLs).
", + "CreateUser": "Creates a MemoryDB user. For more information, see Authenticating users with Access Control Lists (ACLs).
", + "DeleteACL": "Deletes an Access Control List. The ACL must first be disassociated from the cluster before it can be deleted. For more information, see Authenticating users with Access Control Lists (ACLs).
", "DeleteCluster": "Deletes a cluster. It also deletes all associated nodes and node endpoints
", "DeleteParameterGroup": "Deletes the specified parameter group. You cannot delete a parameter group if it is associated with any clusters. You cannot delete the default parameter groups in your account.
", "DeleteSnapshot": "Deletes an existing snapshot. When you receive a successful response from this operation, MemoryDB immediately begins deleting the snapshot; you cannot cancel or revert this operation.
", @@ -40,7 +40,7 @@ }, "shapes": { "ACL": { - "base": "An Access Control List. You can authenticate users with Access Contol Lists. ACLs enable you to control cluster access by grouping users. These Access control lists are designed as a way to organize access to clusters.
", + "base": "An Access Control List. You can authenticate users with Access Control Lists. ACLs enable you to control cluster access by grouping users. These Access control lists are designed as a way to organize access to clusters.
", "refs": { "ACLList$member": null, "CreateACLResponse$ACL": "The newly-created Access Control List.
", diff --git a/pkg/testdata/models/apis/rds/0000-00-00/docs-2.json b/pkg/testdata/models/apis/rds/0000-00-00/docs-2.json index 194933fc..68542075 100644 --- a/pkg/testdata/models/apis/rds/0000-00-00/docs-2.json +++ b/pkg/testdata/models/apis/rds/0000-00-00/docs-2.json @@ -1209,7 +1209,7 @@ } }, "DBProxyNotFoundFault": { - "base": "The specified proxy name doesn't correspond to a proxy owned by your AWS accoutn in the specified AWS Region.
", + "base": "The specified proxy name doesn't correspond to a proxy owned by your AWS account in the specified AWS Region.
", "refs": { } }, @@ -3527,7 +3527,7 @@ "CreateGlobalClusterMessage$SourceDBClusterIdentifier": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional.
", "CreateGlobalClusterMessage$Engine": "Provides the name of the database engine to be used for this DB cluster.
", "CreateGlobalClusterMessage$EngineVersion": "The engine version of the Aurora global database.
", - "CreateGlobalClusterMessage$DatabaseName": "The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.
", + "CreateGlobalClusterMessage$DatabaseName": "The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.
", "CreateOptionGroupMessage$OptionGroupName": "Specifies the name of the option group to be created.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens
First character must be a letter
Can't end with a hyphen or contain two consecutive hyphens
Example: myoptiongroup
Specifies the name of the engine that this option group should be associated with.
", "CreateOptionGroupMessage$MajorEngineVersion": "Specifies the major version of the engine that this option group should be associated with.
", diff --git a/pkg/testdata/models/apis/sagemaker/0000-00-00/docs-2.json b/pkg/testdata/models/apis/sagemaker/0000-00-00/docs-2.json index 511e0803..f3cadfc7 100644 --- a/pkg/testdata/models/apis/sagemaker/0000-00-00/docs-2.json +++ b/pkg/testdata/models/apis/sagemaker/0000-00-00/docs-2.json @@ -34,7 +34,7 @@ "CreateModelPackage": "Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace, or a versioned model that is part of a model group. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.
To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification. To create a model from an algorithm resource that you created or subscribed to in AWS Marketplace, provide a value for SourceAlgorithmSpecification.
There are two types of model packages:
Versioned - a model that is part of a model group in the model registry.
Unversioned - a model package that is not part of a model group.
Creates a model group. A model group contains a group of model versions.
", "CreateModelQualityJobDefinition": "Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor.
", - "CreateMonitoringSchedule": "Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endoint.
", + "CreateMonitoringSchedule": "Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint.
", "CreateNotebookInstance": "Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.
In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.
Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.
After receiving the request, Amazon SageMaker does the following:
Creates a network interface in the Amazon SageMaker VPC.
(Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.
Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.
After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.
After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.
For more information, see How It Works.
", "CreateNotebookInstanceLifecycleConfig": "Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.
View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", "CreatePipeline": "Creates a pipeline using a JSON pipeline definition.
", @@ -84,7 +84,7 @@ "DeleteTrial": "Deletes the specified trial. All trial components that make up the trial must be deleted first. Use the DescribeTrialComponent API to get the list of trial components.
", "DeleteTrialComponent": "Deletes the specified trial component. A trial component must be disassociated from all trials before the trial component can be deleted. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.
", "DeleteUserProfile": "Deletes a user profile. When a user profile is deleted, the user loses access to their EFS volume, including data, notebooks, and other artifacts.
", - "DeleteWorkforce": "Use this operation to delete a workforce.
If you want to create a new workforce in an AWS Region where a workforce already exists, use this operation to delete the existing workforce and then use to create a new workforce.
If a private workforce contains one or more work teams, you must use the operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will recieve a ResourceInUse error.
Use this operation to delete a workforce.
If you want to create a new workforce in an AWS Region where a workforce already exists, use this operation to delete the existing workforce and then use to create a new workforce.
If a private workforce contains one or more work teams, you must use the operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will receive a ResourceInUse error.
Deletes an existing work team. This operation can't be undone.
", "DeregisterDevices": "Deregisters the specified devices. After you deregister a device, you will need to re-register the devices.
", "DescribeAction": "Describes an action.
", @@ -193,7 +193,7 @@ "ListUserProfiles": "Lists user profiles.
", "ListWorkforces": "Use this operation to list all private and vendor workforces in an AWS Region. Note that you can only have one private workforce per AWS Region.
", "ListWorkteams": "Gets a list of private work teams that you have defined in a region. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.
Adds a resouce policy to control access to a model group. For information about resoure policies, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide..
", + "PutModelPackageGroupPolicy": "Adds a resource policy to control access to a model group. For information about resource policies, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide..
", "RegisterDevices": "Register devices.
", "RenderUiTemplate": "Renders the UI template so that you can preview the worker's experience.
", "Search": "Finds Amazon SageMaker resources that match a search query. Matching resources are returned as a list of SearchRecord objects in the response. You can sort the search results by any resource property in a ascending or descending order.
You can query against the following value types: numeric, text, Boolean, and timestamp.
", @@ -754,7 +754,7 @@ } }, "AutoMLCandidate": { - "base": "An Autopilot job returns recommendations, or candidates. Each candidate has futher details about the steps involed, and the status.
", + "base": "An Autopilot job returns recommendations, or candidates. Each candidate has further details about the steps involved, and the status.
", "refs": { "AutoMLCandidates$member": null, "DescribeAutoMLJobResponse$BestCandidate": "Returns the job's BestCandidate.
" @@ -4020,7 +4020,7 @@ "base": null, "refs": { "CreateFlowDefinitionResponse$FlowDefinitionArn": "The Amazon Resource Name (ARN) of the flow definition you create.
", - "DescribeFlowDefinitionResponse$FlowDefinitionArn": "The Amazon Resource Name (ARN) of the flow defintion.
", + "DescribeFlowDefinitionResponse$FlowDefinitionArn": "The Amazon Resource Name (ARN) of the flow definition.
", "FlowDefinitionSummary$FlowDefinitionArn": "The Amazon Resource Name (ARN) of the flow definition.
" } }, @@ -4190,7 +4190,7 @@ "Groups": { "base": null, "refs": { - "OidcMemberDefinition$Groups": "A list of comma seperated strings that identifies user groups in your OIDC IdP. Each user group is made up of a group of private workers.
" + "OidcMemberDefinition$Groups": "A list of comma separated strings that identifies user groups in your OIDC IdP. Each user group is made up of a group of private workers.
" } }, "HookParameters": { @@ -5948,7 +5948,7 @@ } }, "ModelArtifacts": { - "base": "Provides information about the location that is configured for storing model artifacts.
Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model defintion that desribes how to compute inferences, and other metadata.
", + "base": "Provides information about the location that is configured for storing model artifacts.
Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model definition that describes how to compute inferences, and other metadata.
", "refs": { "DescribeCompilationJobResponse$ModelArtifacts": "Information about the location in Amazon S3 that has been configured for storing the model artifacts used in the compilation job.
", "DescribeTrainingJobResponse$ModelArtifacts": "Information about the Amazon S3 location that is configured for storing model artifacts.
", @@ -6223,7 +6223,7 @@ } }, "ModelQualityJobInput": { - "base": "The input for the model quality monitoring job. Currently endponts are supported for input for model quality monitoring jobs.
", + "base": "The input for the model quality monitoring job. Currently endpoints are supported for input for model quality monitoring jobs.
", "refs": { "CreateModelQualityJobDefinitionRequest$ModelQualityJobInput": "A list of the inputs that are monitored. Currently endpoints are supported.
", "DescribeModelQualityJobDefinitionResponse$ModelQualityJobInput": "Inputs for the model quality job.
" @@ -6578,8 +6578,8 @@ "MonitoringTimeOffsetString": { "base": null, "refs": { - "EndpointInput$StartTimeOffset": "If specified, monitoring jobs substract this time from the start time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.
", - "EndpointInput$EndTimeOffset": "If specified, monitoring jobs substract this time from the end time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.
" + "EndpointInput$StartTimeOffset": "If specified, monitoring jobs subtract this time from the start time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.
", + "EndpointInput$EndTimeOffset": "If specified, monitoring jobs subtract this time from the end time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.
" } }, "MonitoringType": { @@ -6722,7 +6722,7 @@ "ListModelQualityJobDefinitionsRequest$NextToken": "If the result of the previous ListModelQualityJobDefinitions request was truncated, the response includes a NextToken. To retrieve the next set of model quality monitoring job definitions, use the token in the next request.
If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model quality monitoring job definitions, use it in the next request.
", "ListMonitoringExecutionsRequest$NextToken": "The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.
", - "ListMonitoringExecutionsResponse$NextToken": "If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent reques
", + "ListMonitoringExecutionsResponse$NextToken": "If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request
", "ListMonitoringSchedulesRequest$NextToken": "The token returned if the response is truncated. To retrieve the next set of job executions, use it in the next request.
", "ListMonitoringSchedulesResponse$NextToken": "If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, use it in the subsequent request.
", "ListNotebookInstanceLifecycleConfigsInput$NextToken": "If the result of a ListNotebookInstanceLifecycleConfigs request was truncated, the response includes a NextToken. To get the next set of lifecycle configurations, use the token in the next request.
Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defauts to FullyReplicated
Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defaults to FullyReplicated
Whether to distribute the data from Amazon S3 to all processing instances with FullyReplicated, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.
Whether the Pipe or File is used as the input mode for transfering data for the monitoring job. Pipe mode is recommended for large datasets. File mode is useful for small files that fit in memory. Defaults to File.
Whether the Pipe or File is used as the input mode for transferring data for the monitoring job. Pipe mode is recommended for large datasets. File mode is useful for small files that fit in memory. Defaults to File.
Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local ML storage volume before starting your processing container. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your processing container into named pipes without using the ML storage volume.
A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:
DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.
DisplayName – The display name to use for a topic with SMS subscriptions.
Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.
The following attribute applies only to server-side-encryption:
KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.
A map of the topic's attributes. Attributes in this map include the following:
DeliveryPolicy – The JSON serialization of the topic's delivery policy.
DisplayName – The human-readable name used in the From field for notifications to email and email-json endpoints.
Owner – The AWS account ID of the topic's owner.
Policy – The JSON serialization of the topic's access control policy.
SubscriptionsConfirmed – The number of confirmed subscriptions for the topic.
SubscriptionsDeleted – The number of deleted subscriptions for the topic.
SubscriptionsPending – The number of subscriptions pending confirmation for the topic.
TopicArn – The topic's ARN.
EffectiveDeliveryPolicy – Yhe JSON serialization of the effective delivery policy, taking system defaults into account.
The following attribute applies only to server-side-encryption:
KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.
A map of the topic's attributes. Attributes in this map include the following:
DeliveryPolicy – The JSON serialization of the topic's delivery policy.
DisplayName – The human-readable name used in the From field for notifications to email and email-json endpoints.
Owner – The AWS account ID of the topic's owner.
Policy – The JSON serialization of the topic's access control policy.
SubscriptionsConfirmed – The number of confirmed subscriptions for the topic.
SubscriptionsDeleted – The number of deleted subscriptions for the topic.
SubscriptionsPending – The number of subscriptions pending confirmation for the topic.
TopicArn – The topic's ARN.
EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy, taking system defaults into account.
The following attribute applies only to server-side-encryption:
" } }, "TopicLimitExceededException": { diff --git a/pkg/util/git.go b/pkg/util/git.go index ac34dca4..da47098d 100644 --- a/pkg/util/git.go +++ b/pkg/util/git.go @@ -25,7 +25,7 @@ import ( // LoadRepository loads a repository from the local file system. // TODO(a-hilaly): load repository into a memory filesystem (needs go1.16 -// migration or use somethign like https://github.com/spf13/afero +// migration or use something like https://github.com/spf13/afero func LoadRepository(path string) (*git.Repository, error) { return git.PlainOpen(path) } diff --git a/scripts/build-controller-release.sh b/scripts/build-controller-release.sh index 5d54bbce..6bd80054 100755 --- a/scripts/build-controller-release.sh +++ b/scripts/build-controller-release.sh @@ -232,7 +232,7 @@ controller-gen rbac:roleName="$K8S_RBAC_ROLE_NAME" paths=./... output:rbac:artif # $config_output_dir/rbac/role.yaml file. We additionally add the ability by # for the user to specify if they want the role to be ClusterRole or Role by specifying installation scope # in the helm values.yaml. We do this by having a custom helm template named _controller-role-kind-patch.yaml -# which utilizes the template langauge and adding the auto generated rules to that template. +# which utilizes the template language and adding the auto generated rules to that template. tail -n +7 "$helm_output_dir/templates/role.yaml" >> "$helm_output_dir/templates/_controller-role-kind-patch.yaml" # We have some other standard Role files for a reader and writer role, so here we rename diff --git a/scripts/build-controller.sh b/scripts/build-controller.sh index 5efb5e32..1b9e071d 100755 --- a/scripts/build-controller.sh +++ b/scripts/build-controller.sh @@ -62,7 +62,7 @@ Environment variables: AWS_SDK_GO_VERSION: Overrides the version of github.com/aws/aws-sdk-go used by 'ack-generate' to fetch the service API Specifications. Default: Version of aws/aws-sdk-go in service go.mod - TEMPLATE_DIRS: Overrides the list of directories containg ack-generate + TEMPLATE_DIRS: Overrides the list of directories containing ack-generate templates. Default: $TEMPLATE_DIRS K8S_RBAC_ROLE_NAME: Name of the Kubernetes Role to use when generating diff --git a/scripts/olm-build-bundle-image.sh b/scripts/olm-build-bundle-image.sh index 08f9e8ff..f7557b5e 100755 --- a/scripts/olm-build-bundle-image.sh +++ b/scripts/olm-build-bundle-image.sh @@ -65,7 +65,7 @@ Environment variables: BUNDLE_DOCKER_IMG_TAG: Bundle container image tag Default: \$AWS_SERVICE-bundle-\$BUNDLE_VERSION BUNDLE_DOCKER_IMG: The bundle container image (including the tag). - Supercedes the use of BUNDLE_DOCKER_IMAGE_TAG + Supersedes the use of BUNDLE_DOCKER_IMAGE_TAG and DOCKER_REPOSITORY if set. Default: $DEFAULT_DOCKER_REPOSITORY:\$AWS_SERVICE-bundle-\$BUNDLE_VERSION ADD_RH_CERTIFICATION_LABELS Adds the certification labels required by Red Hat diff --git a/scripts/olm-publish-bundle-image.sh b/scripts/olm-publish-bundle-image.sh index 2ff46c03..b4977388 100755 --- a/scripts/olm-publish-bundle-image.sh +++ b/scripts/olm-publish-bundle-image.sh @@ -32,7 +32,7 @@ Environment variables: BUNDLE_DOCKER_IMG_TAG: Bundle container image tag Default: \$AWS_SERVICE-bundle-\$BUNDLE_VERSION BUNDLE_DOCKER_IMG: The bundle container image (including the tag). - Supercedes the use of BUNDLE_DOCKER_IMAGE_TAG + Supersedes the use of BUNDLE_DOCKER_IMAGE_TAG and DOCKER_REPOSITORY if set. Default: $DEFAULT_DOCKER_REPOSITORY:\$AWS_SERVICE-bundle-\$BUNDLE_VERSION " diff --git a/templates/pkg/resource/resource.go.tpl b/templates/pkg/resource/resource.go.tpl index 244898ea..c75cb4d6 100644 --- a/templates/pkg/resource/resource.go.tpl +++ b/templates/pkg/resource/resource.go.tpl @@ -32,7 +32,7 @@ func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { } // IsBeingDeleted returns true if the Kubernetes resource has a non-zero -// deletion timestemp +// deletion timestamp func (r *resource) IsBeingDeleted() bool { return !r.ko.DeletionTimestamp.IsZero() }