Skip to content

Commit

Permalink
Refactor S3 connection creation
Browse files Browse the repository at this point in the history
  • Loading branch information
tremble committed Dec 21, 2022
1 parent 156d831 commit 54071a6
Show file tree
Hide file tree
Showing 7 changed files with 290 additions and 508 deletions.
81 changes: 33 additions & 48 deletions plugins/module_utils/s3.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,24 @@
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

from ansible.module_utils.basic import to_text
import string
from urllib.parse import urlparse

from .botocore import boto3_conn

try:
from botocore.client import Config
from botocore.exceptions import BotoCoreError, ClientError
from hashlib import md5
HAS_MD5 = True
except ImportError:
pass # Handled by the calling module
HAS_MD5 = False

HAS_MD5 = True
try:
from hashlib import md5
import botocore
except ImportError:
try:
from md5 import md5
except ImportError:
HAS_MD5 = False
pass # Handled by the calling module


import string
from ansible.module_utils.basic import to_text

from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn


def s3_head_objects(client, parts, bucket, obj, versionId):
Expand Down Expand Up @@ -66,7 +62,7 @@ def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
parts = int(etag[1:-1].split('-')[1])
try:
return calculate_checksum_with_file(s3, parts, bucket, obj, version, filename)
except (BotoCoreError, ClientError) as e:
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to get head object")
else: # Compute the MD5 sum normally
return '"{0}"'.format(module.md5(filename))
Expand All @@ -81,7 +77,7 @@ def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None)
parts = int(etag[1:-1].split('-')[1])
try:
return calculate_checksum_with_content(s3, parts, bucket, obj, version, content)
except (BotoCoreError, ClientError) as e:
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to get head object")
else: # Compute the MD5 sum normally
return '"{0}"'.format(md5(content).hexdigest())
Expand Down Expand Up @@ -130,38 +126,27 @@ def parse_ceph_endpoint(url):
return {"endpoint": url, "use_ssl": use_ssl}


def parse_default_endpoint(url, mode, encryption_mode, dualstack, sig_4):
result = {"endpoint": url}
config = {}
if (mode in ('get', 'getstr') and sig_4) or (mode == "put" and encryption_mode == "aws:kms"):
config["signature_version"] = "s3v4"
if dualstack:
config["s3"] = {"use_dualstack_endpoint": True}
if config != {}:
result["config"] = Config(**config)
return result
def parse_s3_endpoint(options):
endpoint_url = options.get("endpoint_url")
if options.get("ceph"):
return False, parse_ceph_endpoint(endpoint_url)
if is_fakes3(endpoint_url):
return False, parse_fakes3_endpoint(endpoint_url)
return True, {"endpoint": endpoint_url}


def s3_conn_params(mode, encryption_mode, dualstack, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False):
params = {"conn_type": "client", "resource": "s3", "region": location, **aws_connect_kwargs}
if ceph:
endpoint_p = parse_ceph_endpoint(endpoint_url)
elif is_fakes3(endpoint_url):
endpoint_p = parse_fakes3_endpoint(endpoint_url)
else:
endpoint_p = parse_default_endpoint(endpoint_url, mode, encryption_mode, dualstack, sig_4)

params.update(endpoint_p)
return params


def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False):
s3_conn = s3_conn_params(module.params.get("mode"),
module.params.get("encryption_mode"),
module.params.get("dualstack"),
aws_connect_kwargs,
location,
ceph,
endpoint_url,
sig_4)
return boto3_conn(module, **s3_conn)
def s3_extra_params(options, sigv4=False):
aws, extra_params = parse_s3_endpoint(options)
endpoint = extra_params["endpoint"]
if not aws:
return extra_params
dualstack = options.get("dualstack")
if not dualstack and not sigv4:
return extra_params
config = {}
if dualstack:
config["use_dualstack_endpoint"] = True
if sigv4:
config["signature_version"] = "s3v4"
extra_params["config"] = config
return extra_params
129 changes: 43 additions & 86 deletions plugins/modules/s3_bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,10 @@
choices: [ 'none', 'AES256', 'aws:kms' ]
type: str
encryption_key_id:
description: KMS master key ID to use for the default encryption. This parameter is allowed if I(encryption) is C(aws:kms). If
not specified then it will default to the AWS provided KMS key.
description:
- KMS master key ID to use for the default encryption.
- If not specified then it will default to the AWS provided KMS key.
- This parameter is only supported if I(encryption) is C(aws:kms).
type: str
bucket_key_enabled:
description:
Expand Down Expand Up @@ -154,10 +156,17 @@
type: bool
version_added: 3.1.0
default: True
dualstack:
description:
- Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
- Mutually exclusive with I(endpoint_url).
type: bool
default: false
version_added: 6.0.0
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.tags
- amazon.aws.boto3
Expand Down Expand Up @@ -348,22 +357,23 @@
pass # Handled by AnsibleAWSModule

from ansible.module_utils.basic import to_text
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse

from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict

from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params
from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict


def create_or_update_bucket(s3_client, module, location):
def create_or_update_bucket(s3_client, module):

policy = module.params.get("policy")
name = module.params.get("name")
Expand All @@ -379,6 +389,10 @@ def create_or_update_bucket(s3_client, module, location):
delete_object_ownership = module.params.get("delete_object_ownership")
object_ownership = module.params.get("object_ownership")
acl = module.params.get("acl")
# default to US Standard region,
# note: module.region will also try to pull a default out of the boto3 configs.
location = module.region or "us-east-1"

changed = False
result = {}

Expand Down Expand Up @@ -1046,38 +1060,6 @@ def destroy_bucket(s3_client, module):
module.exit_json(changed=True)


def is_fakes3(endpoint_url):
""" Return True if endpoint_url has scheme fakes3:// """
if endpoint_url is not None:
return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s')
else:
return False


def get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url):
if ceph: # TODO - test this
ceph = urlparse(endpoint_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https',
region=location, endpoint=endpoint_url, **aws_connect_kwargs)
elif is_fakes3(endpoint_url):
fakes3 = urlparse(endpoint_url)
port = fakes3.port
if fakes3.scheme == 'fakes3s':
protocol = "https"
if port is None:
port = 443
else:
protocol = "http"
if port is None:
port = 80
params = dict(module=module, conn_type='client', resource='s3', region=location,
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
else:
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs)
return boto3_conn(**params)


def main():

argument_spec = dict(
Expand All @@ -1103,6 +1085,7 @@ def main():
delete_object_ownership=dict(type='bool', default=False),
acl=dict(type='str', choices=['private', 'public-read', 'public-read-write', 'authenticated-read']),
validate_bucket_name=dict(type='bool', default=True),
dualstack=dict(default=False, type="bool"),
)

required_by = dict(
Expand All @@ -1111,7 +1094,8 @@ def main():

mutually_exclusive = [
['public_access', 'delete_public_access'],
['delete_object_ownership', 'object_ownership']
['delete_object_ownership', 'object_ownership'],
["dualstack", "endpoint_url"],
]

required_if = [
Expand All @@ -1125,55 +1109,28 @@ def main():
mutually_exclusive=mutually_exclusive
)

region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
# Parameter validation
encryption = module.params.get("encryption")
encryption_key_id = module.params.get("encryption_key_id")
if encryption_key_id is not None and encryption != 'aws:kms':
module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")

extra_params = s3_extra_params(module.params)
retry_decorator = AWSRetry.jittered_backoff(
max_delay=120,
catch_extra_error_codes=["NoSuchBucket", "OperationAborted"],
)
s3_client = module.client("s3", retry_decorator=retry_decorator, **extra_params)

if module.params.get('validate_bucket_name'):
err = validate_bucket_name(module.params["name"])
if err:
module.fail_json(msg=err)

if region in ('us-east-1', '', None):
# default to US Standard region
location = 'us-east-1'
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region

endpoint_url = module.params.get('endpoint_url')
ceph = module.params.get('ceph')

# Look at endpoint_url and tweak connection settings
# allow eucarc environment variables to be used if ansible vars aren't set
if not endpoint_url and 'S3_URL' in os.environ:
endpoint_url = os.environ['S3_URL']
module.deprecate(
"Support for the 'S3_URL' environment variable has been "
"deprecated. We recommend using the 'endpoint_url' module "
"parameter. Alternatively, the 'AWS_URL' environment variable can"
"be used instead.",
date='2024-12-01', collection_name='amazon.aws',
)

# if connecting to Ceph RGW, Walrus or fakes3
if endpoint_url:
for key in ['validate_certs', 'security_token', 'profile_name']:
aws_connect_kwargs.pop(key, None)
s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url)

if s3_client is None: # this should never happen
module.fail_json(msg='Unknown error, failed to create s3 connection, no information available.')

state = module.params.get("state")
encryption = module.params.get("encryption")
encryption_key_id = module.params.get("encryption_key_id")

# Parameter validation
if encryption_key_id is not None and encryption != 'aws:kms':
module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")

if state == 'present':
create_or_update_bucket(s3_client, module, location)
create_or_update_bucket(s3_client, module)
elif state == 'absent':
destroy_bucket(s3_client, module)

Expand Down
Loading

0 comments on commit 54071a6

Please sign in to comment.