Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rework s3_bucket to use ErrorHandler model #2478

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions changelogs/fragments/20250117-s3_bucket-error_handler.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
minor_changes:
- s3_bucket - migrated to use updated error handlers for better handling of non-AWS errors (https://github.com/ansible-collections/amazon.aws/pull/2478).
bugfixes:
- s3_bucket - fixed idempotency when setting bucket ACLs (https://github.com/ansible-collections/amazon.aws/pull/2478).
- s3_bucket - bucket ACLs now consistently returned (https://github.com/ansible-collections/amazon.aws/pull/2478).
11 changes: 0 additions & 11 deletions check_mypy.sh

This file was deleted.

103 changes: 103 additions & 0 deletions plugins/module_utils/_s3/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-

# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

from __future__ import annotations

import functools

try:
# Beware, S3 is a "special" case, it sometimes catches botocore exceptions and
# re-raises them as boto3 exceptions.
import boto3
import botocore
except ImportError:
pass # Handled by the calling module


from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message
from ansible_collections.amazon.aws.plugins.module_utils.errors import AWSErrorHandler
from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError

IGNORE_S3_DROP_IN_EXCEPTIONS = ["XNotImplemented", "NotImplemented", "AccessControlListNotSupported"]


class AnsibleS3Error(AnsibleAWSError):
pass


class AnsibleS3Sigv4RequiredError(AnsibleS3Error):
pass


class AnsibleS3PermissionsError(AnsibleS3Error):
pass


class AnsibleS3SupportError(AnsibleS3Error):
pass


class AnsibleS3RegionSupportError(AnsibleS3SupportError):
pass


class S3ErrorHandler(AWSErrorHandler):
_CUSTOM_EXCEPTION = AnsibleS3Error

@classmethod
def _is_missing(cls):
return is_boto3_error_code(
[
"404",
"NoSuchTagSet",
"NoSuchTagSetError",
"ObjectLockConfigurationNotFoundError",
"NoSuchBucketPolicy",
"ServerSideEncryptionConfigurationNotFoundError",
"NoSuchBucket",
"NoSuchPublicAccessBlockConfiguration",
"OwnershipControlsNotFoundError",
"NoSuchOwnershipControls",
]
)

@classmethod
def common_error_handler(cls, description):
def wrapper(func):
@super(S3ErrorHandler, cls).common_error_handler(description)
@functools.wraps(func)
def handler(*args, **kwargs):
try:
return func(*args, **kwargs)
except is_boto3_error_code(["403", "AccessDenied"]) as e:
# FUTURE: there's a case to be made that this moves up into AWSErrorHandler
# for now, we'll handle this just for S3, but wait and see if it pops up in too
# many other places
raise AnsibleS3PermissionsError(
message=f"Failed to {description} (permission denied)", exception=e
) from e
except is_boto3_error_message( # pylint: disable=duplicate-except
"require AWS Signature Version 4"
) as e:
raise AnsibleS3Sigv4RequiredError(
message=f"Failed to {description} (not supported by cloud)", exception=e
) from e
except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS) as e: # pylint: disable=duplicate-except
# Unlike most of our modules, we attempt to handle non-AWS clouds. For read-only
# actions we sometimes need the ability to ignore unsupported features.
raise AnsibleS3SupportError(
message=f"Failed to {description} (not supported by cloud)", exception=e
) from e
except botocore.exceptions.EndpointConnectionError as e:
raise cls._CUSTOM_EXCEPTION(
message=f"Failed to {description} - Invalid endpoint provided", exception=e
) from e
except boto3.exceptions.Boto3Error as e:
raise cls._CUSTOM_EXCEPTION(message=f"Failed to {description}", exception=e) from e

return handler

return wrapper
122 changes: 122 additions & 0 deletions plugins/module_utils/_s3/transformations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
# -*- coding: utf-8 -*-

# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

from __future__ import annotations

import copy
import typing

if typing.TYPE_CHECKING:
from typing import Optional

from ansible.module_utils.basic import to_text

from ansible_collections.amazon.aws.plugins.module_utils.transformation import boto3_resource_to_ansible_dict


def normalize_s3_bucket_versioning(versioning_status: Optional[dict]) -> Optional[dict]:
if not versioning_status:
return versioning_status
versioning_result = typing.cast(dict, boto3_resource_to_ansible_dict(versioning_status))
# Original s3_bucket format, no longer advertised but not officially deprecated
versioning_result["Versioning"] = versioning_status.get("Status", "Disabled")
versioning_result["MfaDelete"] = versioning_status.get("MFADelete", "Disabled")
# Original s3_bucket_info format, no longer advertised but not officially deprecated
versioning_result["Status"] = versioning_status.get("Status", "Disabled")
versioning_result["MFADelete"] = versioning_status.get("MFADelete", "Disabled")
return versioning_result


def normalize_s3_bucket_public_access(public_access_status: Optional[dict]) -> Optional[dict]:
if not public_access_status:
return public_access_status
public_access_result = typing.cast(dict, boto3_resource_to_ansible_dict(public_access_status))
public_access_result["PublicAccessBlockConfiguration"] = copy.deepcopy(public_access_status)
public_access_result.update(public_access_status)
return public_access_result


def normalize_s3_bucket_acls(acls: Optional[dict]) -> Optional[dict]:
if not acls:
return acls
acls_result = typing.cast(dict, boto3_resource_to_ansible_dict(acls))
return typing.cast(dict, acls_result["grants"])


def _grantee_is_owner(grant, owner_id):
return grant.get("Grantee", {}).get("ID") == owner_id


def _grantee_is_public(grant):
return grant.get("Grantee", {}).get("URI") == "http://acs.amazonaws.com/groups/global/AllUsers"


def _grantee_is_authenticated(grant):
return grant.get("Grantee", {}).get("URI") == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"


def _acl_permissions(grants):
if not grants:
return []
return [grant.get("Permission") for grant in grants if grant]


def s3_acl_to_name(acl):
if not acl:
return None

try:
grants = acl["Grants"]
owner_id = acl["Owner"]["ID"]
owner_acl = [grant for grant in grants if _grantee_is_owner(grant, owner_id)]
auth_acl = [grant for grant in grants if _grantee_is_authenticated(grant)]
public_acl = [grant for grant in grants if _grantee_is_public(grant)]

if len(grants) > (len(owner_acl) + len(auth_acl) + len(public_acl)):
raise ValueError("Unrecognised Grantee")
if public_acl and auth_acl:
raise ValueError("Public ACLs and Authenticated User ACLs are only used alone in templated ACL")

if ["FULL_CONTROL"] != _acl_permissions(owner_acl):
raise ValueError("Owner doesn't have full control")
if len(grants) == 1:
return "private"

if auth_acl:
if ["READ"] == _acl_permissions(auth_acl):
return "authenticated-read"
raise ValueError("Authenticated User ACLs don't match templated ACL")

permissions = sorted(_acl_permissions(public_acl))
if permissions == ["READ"]:
return "public-read"
if permissions == ["READ", "WRITE"]:
return "public-read-write"

raise ValueError("Public ACLs don't match templated ACL")

except (KeyError, IndexError, ValueError):
return None


def merge_tags(current_tags: dict, new_tags: dict, purge_tags: bool = True) -> dict:
"""
Compare and merge two dicts of tags.

:param current_tags: The current tags
:param new_tags: The tags passed as a parameter
:param purge_tags: Whether to remove current tags that aren't in new_tags
:return: updated_tags: The updated dictionary of tags
"""

if new_tags is None:
return current_tags

updated_tags = copy.deepcopy(current_tags) if not purge_tags else {}
# Tags are always returned as text
new_tags = dict((to_text(k), to_text(v)) for k, v in new_tags.items())
updated_tags.update(new_tags)

return updated_tags
69 changes: 69 additions & 0 deletions plugins/module_utils/_s3/waiters.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-

# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)

from ..waiter import BaseWaiterFactory


class S3WaiterFactory(BaseWaiterFactory):
@property
def _waiter_model_data(self):
PATH_BUCKET_KEY_ENABLED = "ServerSideEncryptionConfiguration.Rules[].BucketKeyEnabled"
data = dict(
bucket_exists=dict(
operation="HeadBucket",
delay=5,
maxAttempts=20,
acceptors=[
dict(state="success", matcher="status", expected=200),
dict(state="success", matcher="status", expected=301),
dict(state="success", matcher="status", expected=403),
],
),
bucket_not_exists=dict(
operation="HeadBucket",
delay=5,
maxAttempts=60,
acceptors=[
dict(state="success", matcher="status", expected=404),
],
),
bucket_versioning_enabled=dict(
operation="GetBucketVersioning",
delay=8,
maxAttempts=25,
acceptors=[
dict(state="success", matcher="path", argument="Status", expected="Enabled"),
],
),
bucket_versioning_suspended=dict(
operation="GetBucketVersioning",
delay=8,
maxAttempts=25,
acceptors=[
dict(state="success", matcher="path", argument="Status", expected="Suspended"),
],
),
bucket_key_encryption_enabled=dict(
operation="GetBucketEncryption",
delay=5,
maxAttempts=12,
acceptors=[
dict(state="success", matcher="pathAll", argument=PATH_BUCKET_KEY_ENABLED, expected=True),
],
),
bucket_key_encryption_disabled=dict(
operation="GetBucketEncryption",
delay=5,
maxAttempts=12,
acceptors=[
dict(state="success", matcher="pathAll", argument=PATH_BUCKET_KEY_ENABLED, expected=False),
],
),
)

return data


waiter_factory = S3WaiterFactory()
Loading
Loading