Skip to content

Commit

Permalink
Support v2019-12-12 for blob, file share, file datalake (#37)
Browse files Browse the repository at this point in the history
* add new version

* upgrade version

* upgrade version

* refine

Co-authored-by: Ubuntu <zunli@zuhvm.etyrgwjlsqfeplvzbzef2qjagg.cbnx.internal.cloudapp.net>
  • Loading branch information
Juliehzl and Ubuntu authored Aug 19, 2020
1 parent 9d43eb1 commit abfd2ce
Show file tree
Hide file tree
Showing 158 changed files with 41,249 additions and 470 deletions.
9 changes: 9 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,15 @@ Handles multi-API versions of Azure Storage Data Plane originally from https://g

Change Log
----------
0.4.1
+++++
* Add tags support for blob
* Add new api support for azure-multiapi-storagev2:
- filedatalake
- v2019-12-12
- fileshare
- v2019-12-12

0.4.0
+++++
* Add v2019-12-12 for azure.multiapi.storagev2.blob
Expand Down
6 changes: 4 additions & 2 deletions azure/multiapi/storagev2/blob/v2019_12_12/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,12 @@
CustomerProvidedEncryptionKey,
ContainerEncryptionScope,
BlobQueryError,
DelimitedJSON,
DelimitedJsonDialect,
DelimitedTextDialect,
ObjectReplicationPolicy,
ObjectReplicationRule
)
from ._list_blobs_helper import BlobPrefix

__version__ = VERSION

Expand Down Expand Up @@ -195,6 +196,7 @@ def download_blob_from_url(
'CorsRule',
'ContainerProperties',
'BlobProperties',
'BlobPrefix',
'FilteredBlob',
'LeaseProperties',
'ContentSettings',
Expand All @@ -215,7 +217,7 @@ def download_blob_from_url(
'PartialBatchErrorException',
'ContainerEncryptionScope',
'BlobQueryError',
'DelimitedJSON',
'DelimitedJsonDialect',
'DelimitedTextDialect',
'BlobQueryReader',
'ObjectReplicationPolicy',
Expand Down
257 changes: 243 additions & 14 deletions azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@
from ._shared.models import UserDelegationKey
from ._lease import BlobLeaseClient
from ._models import (
BlobProperties,
ContainerProperties,
BlobProperties,
PublicAccess,
BlobAnalyticsLogging,
Metrics,
Expand Down
78 changes: 70 additions & 8 deletions azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,8 @@
from ._models import ( # pylint: disable=unused-import
ContainerProperties,
BlobProperties,
BlobPropertiesPaged,
BlobType,
BlobPrefix)
BlobType)
from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged
from ._lease import BlobLeaseClient, get_access_conditions
from ._blob_client import BlobClient

Expand Down Expand Up @@ -783,6 +782,12 @@ def upload_blob(
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. "\"tagname\"='my tag'"
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
Expand Down Expand Up @@ -893,6 +898,12 @@ def delete_blob(
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. "\"tagname\"='my tag'"
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
Expand Down Expand Up @@ -952,6 +963,12 @@ def download_blob(self, blob, offset=None, length=None, **kwargs):
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. "\"tagname\"='my tag'"
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
Expand Down Expand Up @@ -998,6 +1015,9 @@ def _generate_delete_blobs_subrequest_options(
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags

# Construct parameters
timeout = kwargs.pop('timeout', None)
Expand Down Expand Up @@ -1027,6 +1047,8 @@ def _generate_delete_blobs_subrequest_options(
if if_none_match is not None:
header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access
"if_none_match", if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access

return query_parameters, header_parameters

Expand All @@ -1039,6 +1061,7 @@ def _generate_delete_blobs_options(self,
delete_snapshots = kwargs.pop('delete_snapshots', None)
if_modified_since = kwargs.pop('if_modified_since', None)
if_unmodified_since = kwargs.pop('if_unmodified_since', None)
if_tags_match_condition = kwargs.pop('if_tags_match_condition', None)
kwargs.update({'raise_on_any_failure': raise_on_any_failure,
'sas': self._query_str.replace('?', '&'),
'timeout': '&timeout=' + str(timeout) if timeout else ""
Expand All @@ -1057,18 +1080,21 @@ def _generate_delete_blobs_options(self,
if_modified_since=if_modified_since or blob.get('if_modified_since'),
if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'),
etag=blob.get('etag'),
if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'),
match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag')
else None,
timeout=blob.get('timeout'),
)
query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options)
except AttributeError:
query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(
options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access
delete_snapshots=delete_snapshots,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since
if_unmodified_since=if_unmodified_since,
if_tags_match_condition=if_tags_match_condition
)

query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options)

req = HttpRequest(
"DELETE",
"/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str),
Expand Down Expand Up @@ -1113,6 +1139,8 @@ def delete_blobs(self, *blobs, **kwargs):
key: 'etag', value type: str
match the etag or not:
key: 'match_condition', value type: MatchConditions
tags match condition:
key: 'if_tags_match_condition', value type: str
lease:
key: 'lease_id', value type: Union[str, LeaseClient]
timeout for subrequest:
Expand All @@ -1135,6 +1163,12 @@ def delete_blobs(self, *blobs, **kwargs):
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str if_tags_match_condition
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. "\"tagname\"='my tag'"
.. versionadded:: 12.4.0
:keyword bool raise_on_any_failure:
This is a boolean param which defaults to True. When this is set, an exception
is raised even if there is a single operation failure.
Expand All @@ -1152,19 +1186,25 @@ def delete_blobs(self, *blobs, **kwargs):
:dedent: 8
:caption: Deleting multiple blobs.
"""
if len(blobs) == 0:
return iter(list())

reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs)

return self._batch_send(*reqs, **options)

def _generate_set_tiers_subrequest_options(
self, tier, rehydrate_priority=None, lease_access_conditions=None, **kwargs
self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs
):
"""This code is a copy from _generated.
Once Autorest is able to provide request preparation this code should be removed.
"""
if not tier:
raise ValueError("A blob tier must be specified")
if snapshot and version_id:
raise ValueError("Snapshot and version_id cannot be set at the same time")
if_tags = kwargs.pop('if_tags', None)

lease_id = None
if lease_access_conditions is not None:
Expand All @@ -1174,6 +1214,10 @@ def _generate_set_tiers_subrequest_options(
timeout = kwargs.pop('timeout', None)
# Construct parameters
query_parameters = {}
if snapshot is not None:
query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access
if version_id is not None:
query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access
if timeout is not None:
query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access
query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call
Expand All @@ -1186,6 +1230,8 @@ def _generate_set_tiers_subrequest_options(
"rehydrate_priority", rehydrate_priority, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access

return query_parameters, header_parameters

Expand All @@ -1197,6 +1243,7 @@ def _generate_set_tiers_options(self,
timeout = kwargs.pop('timeout', None)
raise_on_any_failure = kwargs.pop('raise_on_any_failure', True)
rehydrate_priority = kwargs.pop('rehydrate_priority', None)
if_tags = kwargs.pop('if_tags_match_condition', None)
kwargs.update({'raise_on_any_failure': raise_on_any_failure,
'sas': self._query_str.replace('?', '&'),
'timeout': '&timeout=' + str(timeout) if timeout else ""
Expand All @@ -1211,13 +1258,16 @@ def _generate_set_tiers_options(self,
tier = blob_tier or blob.get('blob_tier')
query_parameters, header_parameters = self._generate_set_tiers_subrequest_options(
tier=tier,
snapshot=blob.get('snapshot'),
version_id=blob.get('version_id'),
rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'),
lease_access_conditions=blob.get('lease_id'),
if_tags=if_tags or blob.get('if_tags_match_condition'),
timeout=timeout or blob.get('timeout')
)
except AttributeError:
query_parameters, header_parameters = self._generate_set_tiers_subrequest_options(
blob_tier, rehydrate_priority=rehydrate_priority)
blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags)

req = HttpRequest(
"PUT",
Expand Down Expand Up @@ -1270,12 +1320,24 @@ def set_standard_blob_tier_blobs(
key: 'rehydrate_priority', value type: RehydratePriority
lease:
key: 'lease_id', value type: Union[str, LeaseClient]
snapshot:
key: "snapshost", value type: str
version id:
key: "version_id", value type: str
tags match condition:
key: 'if_tags_match_condition', value type: str
timeout for subrequest:
key: 'timeout', value type: int
:type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
:keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
Indicates the priority with which to rehydrate an archived blob
:keyword str if_tags_match_condition
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. "\"tagname\"='my tag'"
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword bool raise_on_any_failure:
Expand Down
67 changes: 58 additions & 9 deletions azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,11 @@
TYPE_CHECKING
)

from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties
from ._shared.models import get_enum_value

from ._shared.response_handlers import deserialize_metadata
from ._models import BlobProperties, ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule

if TYPE_CHECKING:
Expand All @@ -20,7 +23,7 @@
def deserialize_blob_properties(response, obj, headers):
blob_properties = BlobProperties(
metadata=deserialize_metadata(response, obj, headers),
object_replication_source_properties=deserialize_ors_policies(response),
object_replication_source_properties=deserialize_ors_policies(response.headers),
**headers
)
if 'Content-Range' in headers:
Expand All @@ -31,20 +34,21 @@ def deserialize_blob_properties(response, obj, headers):
return blob_properties


def deserialize_ors_policies(response):
def deserialize_ors_policies(policy_dictionary):

if policy_dictionary is None:
return None
# For source blobs (blobs that have policy ids and rule ids applied to them),
# the header will be formatted as "x-ms-or-<policy_id>_<rule_id>: {Complete, Failed}".
# The value of this header is the status of the replication.
or_policy_status_headers = {key: val for key, val in response.headers.items()
if key.startswith('x-ms-or') and key != 'x-ms-or-policy-id'}
or_policy_status_headers = {key: val for key, val in policy_dictionary.items()
if 'or-' in key and key != 'x-ms-or-policy-id'}

parsed_result = {}

# all the ors headers have the same prefix, so we note down its length here to avoid recalculating it repeatedly
header_prefix_length = len('x-ms-or-')

for key, val in or_policy_status_headers.items():
policy_and_rule_ids = key[header_prefix_length:].split('_')
# list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule
policy_and_rule_ids = key.split('or-')[1].split('_')
policy_id = policy_and_rule_ids[0]
rule_id = policy_and_rule_ids[1]

Expand Down Expand Up @@ -106,3 +110,48 @@ def service_properties_deserialize(generated):
'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access
'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access
}


def get_blob_properties_from_generated_code(generated):
blob = BlobProperties()
blob.name = generated.name
blob_type = get_enum_value(generated.properties.blob_type)
blob.blob_type = BlobType(blob_type) if blob_type else None
blob.etag = generated.properties.etag
blob.deleted = generated.deleted
blob.snapshot = generated.snapshot
blob.is_append_blob_sealed = generated.properties.is_sealed
blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access
blob.last_modified = generated.properties.last_modified
blob.creation_time = generated.properties.creation_time
blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access
blob.size = generated.properties.content_length
blob.page_blob_sequence_number = generated.properties.blob_sequence_number
blob.server_encrypted = generated.properties.server_encrypted
blob.encryption_scope = generated.properties.encryption_scope
blob.deleted_time = generated.properties.deleted_time
blob.remaining_retention_days = generated.properties.remaining_retention_days
blob.blob_tier = generated.properties.access_tier
blob.rehydrate_priority = generated.properties.rehydrate_priority
blob.blob_tier_inferred = generated.properties.access_tier_inferred
blob.archive_status = generated.properties.archive_status
blob.blob_tier_change_time = generated.properties.access_tier_change_time
blob.version_id = generated.version_id
blob.is_current_version = generated.is_current_version
blob.tag_count = generated.properties.tag_count
blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access
blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata)
return blob


def parse_tags(generated_tags):
# type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None]
"""Deserialize a list of BlobTag objects into a dict.
"""
if generated_tags:
tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
return tag_dict
return None
2 changes: 1 addition & 1 deletion azure/multiapi/storagev2/blob/v2019_12_12/_download.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attr
The properties of the blob being downloaded. If only a range of the data is being
downloaded, this will be reflected in the properties.
:ivar int size:
The size of the total data in the stream. This will be the byte range if speficied,
The size of the total data in the stream. This will be the byte range if specified,
otherwise the total size of the blob.
"""

Expand Down
Loading

0 comments on commit abfd2ce

Please sign in to comment.